summaryrefslogtreecommitdiffstats
path: root/ansible_collections/azure/azcollection/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/azure/azcollection/plugins')
-rw-r--r--ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py2
-rw-r--r--ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py3
-rw-r--r--ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py24
-rw-r--r--ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py31
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_accesstoken_info.py2
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py326
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py44
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py92
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py46
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py78
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py33
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py103
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py656
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py131
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py10
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py18
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py807
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py468
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py208
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway.py427
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway_info.py267
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py64
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexiblefirewallrule.py2
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver.py102
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver_info.py29
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py4
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase.py392
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase_info.py305
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy.py277
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy_info.py228
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy.py244
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy_info.py206
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy.py672
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy_info.py245
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py89
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py43
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py52
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule.py354
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule_info.py236
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmsku_info.py311
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py309
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py25
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py141
-rw-r--r--ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py52
44 files changed, 6344 insertions, 1814 deletions
diff --git a/ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py b/ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py
index 74c1286ac..9fc975084 100644
--- a/ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py
+++ b/ansible_collections/azure/azcollection/plugins/doc_fragments/azure.py
@@ -134,7 +134,7 @@ options:
requirements:
- python >= 2.7
- The host that executes this module must have the azure.azcollection collection installed via galaxy
- - All python packages listed in collection's requirements-azure.txt must be installed via pip on the host that executes modules from azure.azcollection
+ - All python packages listed in collection's requirements.txt must be installed via pip on the host that executes modules from azure.azcollection
- Full installation instructions may be found https://galaxy.ansible.com/azure/azcollection
notes:
diff --git a/ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py b/ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py
index 12970dec3..0da9d4fcb 100644
--- a/ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py
+++ b/ansible_collections/azure/azcollection/plugins/inventory/azure_rm.py
@@ -602,7 +602,8 @@ class AzureHost(object):
# set nic-related values from the primary NIC first
for nic in sorted(self.nics, key=lambda n: n.is_primary, reverse=True):
# and from the primary IP config per NIC first
- for ipc in sorted(nic._nic_model['properties']['ipConfigurations'], key=lambda i: i['properties'].get('primary', False), reverse=True):
+ for ipc in sorted(nic._nic_model.get('properties', {}).get('ipConfigurations', []),
+ key=lambda i: i.get('properties', {}).get('primary', False), reverse=True):
try:
subnet = ipc['properties'].get('subnet')
if subnet:
diff --git a/ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py b/ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py
index 5e693e4b3..6a6dd8f10 100644
--- a/ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py
+++ b/ansible_collections/azure/azcollection/plugins/lookup/azure_keyvault_secret.py
@@ -34,6 +34,8 @@ options:
description: Tenant id of service principal.
use_msi:
description: MSI token autodiscover, default is true.
+ cloud_type:
+ description: Specify which cloud, such as C(azure), C(usgovcloudapi).
notes:
- If version is not provided, this plugin will return the latest version of the secret.
- If ansible is running on Azure Virtual Machine with MSI enabled, client_id, secret and tenant isn't required.
@@ -51,6 +53,10 @@ EXAMPLE = """
debug:
msg: msg: "{{ lookup('azure.azcollection.azure_keyvault_secret', 'testsecret', vault_url=key_vault_uri)}}"
+- name: Look up secret with cloud type
+ debug:
+ msg: msg: "{{ lookup('azure.azcollection.azure_keyvault_secret', 'testsecret', cloud_type='usgovcloudapi', vault_url=key_vault_uri)}}"
+
- name: Look up secret when ansible host is MSI enabled Azure VM
debug:
msg: "the value of this secret is {{
@@ -133,15 +139,6 @@ TOKEN_ACQUIRED = False
logger = logging.getLogger("azure.identity").setLevel(logging.ERROR)
-token_params = {
- 'api-version': '2018-02-01',
- 'resource': 'https://vault.azure.net'
-}
-
-token_headers = {
- 'Metadata': 'true'
-}
-
def lookup_secret_non_msi(terms, vault_url, kwargs):
@@ -178,6 +175,15 @@ class LookupModule(LookupBase):
TOKEN_ACQUIRED = False
token = None
+ token_params = {
+ 'api-version': '2018-02-01',
+ 'resource': 'https://vault.{0}.net'.format(kwargs.get('cloud_type', 'azure'))
+ }
+
+ token_headers = {
+ 'Metadata': 'true'
+ }
+
if use_msi:
try:
token_res = requests.get('http://169.254.169.254/metadata/identity/oauth2/token',
diff --git a/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py b/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py
index 79b5167b1..c747fc72c 100644
--- a/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py
+++ b/ansible_collections/azure/azcollection/plugins/module_utils/azure_rm_common.py
@@ -279,8 +279,6 @@ try:
except ImportError:
import azure.mgmt.recoveryservicesbackup.activestamp.models as RecoveryServicesBackupModels
from azure.mgmt.search import SearchManagementClient
- from azure.mgmt.datalake.store import DataLakeStoreAccountManagementClient
- import azure.mgmt.datalake.store.models as DataLakeStoreAccountModel
from azure.mgmt.notificationhubs import NotificationHubsManagementClient
from azure.mgmt.eventhub import EventHubManagementClient
from azure.mgmt.datafactory import DataFactoryManagementClient
@@ -704,6 +702,12 @@ class AzureRMModuleBase(object):
account = self.storage_client.storage_accounts.get_properties(resource_group_name=resource_group_name, account_name=storage_account_name)
if auth_mode == 'login' and self.azure_auth.credentials.get('credential'):
credential = self.azure_auth.credentials['credential']
+ elif (auth_mode == 'login' and self.azure_auth.credentials.get('tenant')
+ and self.azure_auth.credentials.get('client_id')
+ and self.azure_auth.credentials.get('secret')):
+ credential = client_secret.ClientSecretCredential(tenant_id=self.azure_auth.credentials.get('tenant'),
+ client_id=self.azure_auth.credentials.get('client_id'),
+ client_secret=self.azure_auth.credentials.get('secret'))
else:
account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name=resource_group_name, account_name=storage_account_name)
credential = account_keys.keys[0].value
@@ -947,11 +951,17 @@ class AzureRMModuleBase(object):
def _ansible_get_models(self, *arg, **kwarg):
return self._ansible_models
- setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)
+ try:
+ setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)
+ except AttributeError:
+ setattr(client, '_ansible_models', importlib.import_module(client_type.__module__)._models)
client.models = types.MethodType(_ansible_get_models, client)
if self.azure_auth._cert_validation_mode == 'ignore':
- client._config.session_configuration_callback = self._validation_ignore_callback
+ if hasattr(client, '_config'):
+ client._config.session_configuration_callback = self._validation_ignore_callback
+ else:
+ client.config.session_configuration_callback = self._validation_ignore_callback
return client
@@ -1361,19 +1371,6 @@ class AzureRMModuleBase(object):
return self._search_client
@property
- def datalake_store_client(self):
- self.log('Getting datalake store client...')
- if not self._datalake_store_client:
- self._datalake_store_client = self.get_mgmt_svc_client(DataLakeStoreAccountManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2016-11-01')
- return self._datalake_store_client
-
- @property
- def datalake_store_models(self):
- return DataLakeStoreAccountModel
-
- @property
def notification_hub_client(self):
self.log('Getting notification hub client')
if not self._notification_hub_client:
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_accesstoken_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_accesstoken_info.py
index cf9569868..c020d4dd1 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_accesstoken_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_accesstoken_info.py
@@ -95,7 +95,7 @@ class AzureRMAccessToken(AzureRMModuleBase):
super(AzureRMAccessToken, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False,
- is_ad_resource=False)
+ is_ad_resource=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py
index b428463aa..33270da27 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication.py
@@ -146,33 +146,101 @@ options:
optional_claims:
description:
- Declare the optional claims for the application.
- type: list
- elements: dict
+ type: dict
suboptions:
- name:
- description:
- - The name of the optional claim.
- type: str
- required: True
- source:
+ access_token_claims :
description:
- - The source (directory object) of the claim.
- - There are predefined claims and user-defined claims from extension properties.
- - If the source value is null, the claim is a predefined optional claim.
- - If the source value is user, the value in the name property is the extension property from the user object.
- type: str
- essential:
+ - The optional claims returned in the JWT access token
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the optional claim.
+ type: str
+ required: True
+ source:
+ description:
+ - The source (directory object) of the claim.
+ - There are predefined claims and user-defined claims from extension properties.
+ - If the source value is null, the claim is a predefined optional claim.
+ - If the source value is user, the value in the name property is the extension property from the user object.
+ type: str
+ essential:
+ description:
+ - If the value is true, the claim specified by the client is necessary to ensure a smooth authorization experience\
+ for the specific task requested by the end user.
+ - The default value is false.
+ default: false
+ type: bool
+ additional_properties:
+ description:
+ - Additional properties of the claim.
+ - If a property exists in this collection, it modifies the behavior of the optional claim specified in the name property.
+ type: list
+ elements: str
+ id_token_claims:
description:
- - If the value is true, the claim specified by the client is necessary to ensure a smooth authorization experience
- for the specific task requested by the end user.
- - The default value is false.
- default: false
- type: bool
- additional_properties:
+ - The optional claims returned in the JWT ID token
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the optional claim.
+ type: str
+ required: True
+ source:
+ description:
+ - The source (directory object) of the claim.
+ - There are predefined claims and user-defined claims from extension properties.
+ - If the source value is null, the claim is a predefined optional claim.
+ - If the source value is user, the value in the name property is the extension property from the user object.
+ type: str
+ essential:
+ description:
+ - If the value is true, the claim specified by the client is necessary to ensure a smooth authorization experience\
+ for the specific task requested by the end user.
+ - The default value is false.
+ default: false
+ type: bool
+ additional_properties:
+ description:
+ - Additional properties of the claim.
+ - If a property exists in this collection, it modifies the behavior of the optional claim specified in the name property.
+ type: list
+ elements: str
+ saml2_token_claims:
description:
- - Additional properties of the claim.
- - If a property exists in this collection, it modifies the behavior of the optional claim specified in the name property.
- type: str
+ - The optional claims returned in the SAML token
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the optional claim.
+ type: str
+ required: True
+ source:
+ description:
+ - The source (directory object) of the claim.
+ - There are predefined claims and user-defined claims from extension properties.
+ - If the source value is null, the claim is a predefined optional claim.
+ - If the source value is user, the value in the name property is the extension property rom the user object.
+ type: str
+ essential:
+ description:
+ - If the value is true, the claim specified by the client is necessary to ensure a smooth authorization experience\
+ for the specific task requested by the end user.
+ - The default value is false.
+ default: false
+ type: bool
+ additional_properties:
+ description:
+ - Additional properties of the claim.
+ - If a property exists in this collection, it modifies the behavior of the optional claim specified in the name property.
+ type: list
+ elements: str
password:
description:
- App password, aka 'client secret'.
@@ -293,87 +361,99 @@ EXAMPLES = '''
'''
RETURN = '''
-output:
+display_name:
+ description:
+ - Object's display name or its prefix.
+ type: str
+ returned: always
+ sample: fredAKSCluster
+app_id:
+ description:
+ - The application ID.
+ returned: always
+ type: str
+ sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+object_id:
+ description:
+ - Object ID of the application
+ returned: always
+ type: str
+ sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+sign_in_audience:
description:
- - Current state of the adapplication.
+ - The application can be used from any Azure AD tenants.
+ returned: always
+ type: str
+ sample: AzureADandPersonalMicrosoftAccount
+available_to_other_tenants:
+ description:
+ - The application can be used from any Azure AD tenants.
+ returned: always
+ type: str
+ sample: AzureADandPersonalMicrosoftAccount
+homepage:
+ description:
+ - The url where users can sign in and use your app.
+ returned: always
+ type: str
+ sample: null
+identifier_uris:
+ description:
+ - Space-separated unique URIs that Azure AD can use for this app.
+ returned: always
+ type: list
+ sample: []
+oauth2_allow_implicit_flow:
+ description:
+ - Whether to allow implicit grant flow for OAuth2.
+ returned: always
+ type: bool
+ sample: false
+public_client_reply_urls:
+ description:
+ - The public client redirect urls.
+ - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
+ returned: always
+ type: list
+ sample: []
+web_reply_urls:
+ description:
+ - The web redirect urls.
+ - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
+ returned: always
+ type: list
+ sample: []
+spa_reply_urls:
+ description:
+ - The spa redirect urls.
+ - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
+ returned: always
+ type: list
+ sample: []
+optional_claims:
+ description:
+ - Declare the optional claims for the application.
type: complex
- returned: awalys
+ returned: always
contains:
- display_name:
- description:
- - Object's display name or its prefix.
- type: str
- returned: always
- sample: fredAKSCluster
- app_id:
- description:
- - The application ID.
- returned: always
- type: str
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- object_id:
- description:
- - Object ID of the application
- returned: always
- type: str
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- sign_in_audience:
- description:
- - The application can be used from any Azure AD tenants.
- returned: always
- type: str
- sample: AzureADandPersonalMicrosoftAccount
- available_to_other_tenants:
- description:
- - The application can be used from any Azure AD tenants.
- returned: always
- type: str
- sample: AzureADandPersonalMicrosoftAccount
- homepage:
- description:
- - The url where users can sign in and use your app.
- returned: always
- type: str
- sample: null
- identifier_uris:
+ access_token_claims :
description:
- - Space-separated unique URIs that Azure AD can use for this app.
- returned: always
+ - The optional claims returned in the JWT access token
type: list
- sample: []
- oauth2_allow_implicit_flow:
- description:
- - Whether to allow implicit grant flow for OAuth2.
returned: always
- type: bool
- sample: false
- optional_claims:
+ sample: ['name': 'aud', 'source': null, 'essential': false, 'additional_properties': []]
+ id_token_claims:
description:
- - The optional claims for the application.
- returned: always
+ - The optional claims returned in the JWT ID token
type: list
- sample: []
- public_client_reply_urls:
- description:
- - The public client redirect urls.
- - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
returned: always
- type: list
- sample: []
- web_reply_urls:
+ sample: ['name': 'acct', 'source': null, 'essential': false, 'additional_properties': []]
+ saml2_token_claims:
description:
- - The web redirect urls.
- - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
- returned: always
+ - The optional claims returned in the SAML token
type: list
- sample: []
- spa_reply_urls:
- description:
- - The spa redirect urls.
- - Space-separated URIs to which Azure AD will redirect in response to an OAuth 2.0 request.
returned: always
- type: list
- sample: []
+ sample: ['name': 'acct', 'source': null, 'essential': false, 'additional_properties': []]
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
@@ -395,6 +475,8 @@ try:
from msgraph.generated.models.spa_application import SpaApplication
from msgraph.generated.models.public_client_application import PublicClientApplication
from msgraph.generated.models.implicit_grant_settings import ImplicitGrantSettings
+ from msgraph.generated.models.optional_claim import OptionalClaim
+ from msgraph.generated.models.optional_claims import OptionalClaims
except ImportError:
# This is handled in azure_rm_common
pass
@@ -419,7 +501,7 @@ app_role_spec = dict(
)
)
-optional_claims_spec = dict(
+claims_spec = dict(
name=dict(
type='str',
required=True
@@ -432,9 +514,11 @@ optional_claims_spec = dict(
default=False
),
additional_properties=dict(
- type='str'
+ type='list',
+ elements='str'
)
)
+
required_resource_accesses_spec = dict(
resource_app_id=dict(
type='str'
@@ -481,7 +565,14 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
key_value=dict(type='str', no_log=True),
native_app=dict(type='bool'),
oauth2_allow_implicit_flow=dict(type='bool'),
- optional_claims=dict(type='list', elements='dict', options=optional_claims_spec),
+ optional_claims=dict(
+ type='dict',
+ options=dict(
+ access_token_claims=dict(type='list', elements='dict', no_log=True, options=claims_spec),
+ id_token_claims=dict(type='list', elements='dict', no_log=True, options=claims_spec),
+ saml2_token_claims=dict(type='list', elements='dict', no_log=True, options=claims_spec),
+ )
+ ),
password=dict(type='str', no_log=True),
public_client_reply_urls=dict(type='list', elements='str'),
web_reply_urls=dict(type='list', elements='str', aliases=['reply_urls']),
@@ -559,6 +650,9 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
if self.app_roles:
app_roles = self.build_app_roles(self.app_roles)
+ if self.optional_claims:
+ optional_claims = self.build_optional_claims(self.optional_claims)
+
create_app = Application(
sign_in_audience=self.sign_in_audience,
web=WebApplication(
@@ -576,7 +670,7 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
password_credentials=password_creds,
required_resource_access=required_accesses,
app_roles=app_roles,
- optional_claims=self.optional_claims
+ optional_claims=optional_claims
# allow_guests_sign_in=self.allow_guests_sign_in,
)
response = asyncio.get_event_loop().run_until_complete(self.create_application(create_app))
@@ -603,6 +697,9 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
if self.app_roles:
app_roles = self.build_app_roles(self.app_roles)
+ if self.optional_claims:
+ optional_claims = self.build_optional_claims(self.optional_claims)
+
app_update_param = Application(
sign_in_audience=self.sign_in_audience,
web=WebApplication(
@@ -621,7 +718,7 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
required_resource_access=required_accesses,
# allow_guests_sign_in=self.allow_guests_sign_in,
app_roles=app_roles,
- optional_claims=self.optional_claims)
+ optional_claims=optional_claims)
asyncio.get_event_loop().run_until_complete(self.update_application(
obj_id=old_response['object_id'], update_app=app_update_param))
@@ -665,6 +762,15 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
return True
return False
+ def serialize_claims(self, claims):
+ if claims is None:
+ return None
+ return [{
+ "additional_properties": claim.additional_properties,
+ "essential": claim.essential,
+ "name": claim.name,
+ "source": claim.source} for claim in claims]
+
def to_dict(self, object):
app_roles = [{
'id': app_role.id,
@@ -673,6 +779,11 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
'value': app_role.value,
"description": app_role.description
} for app_role in object.app_roles]
+ optional_claims = {
+ "access_token": self.serialize_claims(object.optional_claims.access_token),
+ "id_token": self.serialize_claims(object.optional_claims.id_token),
+ "saml2_token": self.serialize_claims(object.optional_claims.saml2_token)
+ } if object.optional_claims is not None else object.optional_claims
return dict(
app_id=object.app_id,
object_id=object.id,
@@ -683,7 +794,7 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
homepage=object.web.home_page_url,
identifier_uris=object.identifier_uris,
oauth2_allow_implicit_flow=object.web.implicit_grant_settings.enable_access_token_issuance,
- optional_claims=object.optional_claims,
+ optional_claims=optional_claims,
# allow_guests_sign_in=object.allow_guests_sign_in,
web_reply_urls=object.web.redirect_uris,
spa_reply_urls=object.spa.redirect_uris,
@@ -762,6 +873,25 @@ class AzureRMADApplication(AzureRMModuleBaseExt):
result.append(role)
return result
+ def build_optional_claims(self, optional_claims):
+
+ def build_claims(claims_dict):
+ if claims_dict is None:
+ return None
+ return [OptionalClaim(
+ essential=claim.get("essential"),
+ name=claim.get("name"),
+ source=claim.get("source"),
+ additional_properties=claim.get("additional_properties")
+ ) for claim in claims_dict]
+
+ claims = OptionalClaims(
+ access_token=build_claims(optional_claims.get("access_token_claims")),
+ id_token=build_claims(optional_claims.get("id_token_claims")),
+ saml2_token=build_claims(optional_claims.get("saml2_token_claims"))
+ )
+ return claims
+
async def create_application(self, creat_app):
return await self._client.applications.post(body=creat_app)
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py
index 167b82552..e3eb53aac 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adapplication_info.py
@@ -129,6 +129,30 @@ applications:
returned: always
type: list
sample: []
+ optional_claims:
+ description:
+ - Declare the optional claims for the application.
+ type: complex
+ returned: always
+ contains:
+ access_token_claims :
+ description:
+ - The optional claims returned in the JWT access token
+ type: list
+ returned: always
+ sample: ['name': 'aud', 'source': null, 'essential': false, 'additional_properties': []]
+ id_token_claims:
+ description:
+ - The optional claims returned in the JWT ID token
+ type: list
+ returned: always
+ sample: ['name': 'acct', 'source': null, 'essential': false, 'additional_properties': []]
+ saml2_token_claims:
+ description:
+ - The optional claims returned in the SAML token
+ type: list
+ returned: always
+ sample: ['name': 'acct', 'source': null, 'essential': false, 'additional_properties': []]
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase
@@ -191,8 +215,17 @@ class AzureRMADApplicationInfo(AzureRMModuleBase):
return self.results
+ def serialize_claims(self, claims):
+ if claims is None:
+ return None
+ return [{
+ "additional_properties": claim.additional_properties,
+ "essential": claim.essential,
+ "name": claim.name,
+ "source": claim.source} for claim in claims]
+
def to_dict(self, object):
- return dict(
+ response = dict(
app_id=object.app_id,
object_id=object.id,
app_display_name=object.display_name,
@@ -201,9 +234,16 @@ class AzureRMADApplicationInfo(AzureRMModuleBase):
sign_in_audience=object.sign_in_audience,
web_reply_urls=object.web.redirect_uris,
spa_reply_urls=object.spa.redirect_uris,
- public_client_reply_urls=object.public_client.redirect_uris
+ public_client_reply_urls=object.public_client.redirect_uris,
+ optional_claims=dict(access_token=[], id_token=[], saml2_token=[])
)
+ if object.optional_claims is not None:
+ response['optional_claims']['id_token'] = self.serialize_claims(object.optional_claims.id_token)
+ response['optional_claims']['saml2_token'] = self.serialize_claims(object.optional_claims.saml2_token)
+ response['optional_claims']['access_token'] = self.serialize_claims(object.optional_claims.access_token)
+ return response
+
async def get_application(self, obj_id):
return await self._client.applications.by_application_id(obj_id).get()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py
index 1693794a7..4f7f3ed5a 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup.py
@@ -63,6 +63,12 @@ options:
- The azure ad objects asserted to not be owners of the group.
type: list
elements: str
+ raw_membership:
+ description:
+ - By default the group_members return property is flattened and partially filtered of non-User objects before return. \
+ This argument disables those transformations.
+ default: false
+ type: bool
description:
description:
- An optional description for the group.
@@ -109,6 +115,15 @@ EXAMPLES = '''
- "{{ ad_object_1_object_id }}"
- "{{ ad_object_2_object_id }}"
+- name: Ensure Users are Members of a Group using object_id. Specify the group_membership return should be unfiltered
+ azure_rm_adgroup:
+ object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ state: 'present'
+ present_members:
+ - "{{ ad_object_1_object_id }}"
+ - "{{ ad_object_2_object_id }}"
+ raw_membership: true
+
- name: Ensure Users are not Members of a Group using display_name and mail_nickname
azure_rm_adgroup:
display_name: "Group-Name"
@@ -117,7 +132,7 @@ EXAMPLES = '''
absent_members:
- "{{ ad_object_1_object_id }}"
-- name: Ensure Users are Members of a Group using object_id
+- name: Ensure Users are not Members of a Group using object_id
azure_rm_adgroup:
object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
state: 'present'
@@ -150,7 +165,7 @@ EXAMPLES = '''
- "{{ ad_object_1_object_id }}"
- "{{ ad_object_2_object_id }}"
-- name: Ensure Users are Owners of a Group using object_id
+- name: Ensure Users are not Owners of a Group using object_id
azure_rm_adgroup:
object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
state: 'present'
@@ -203,7 +218,7 @@ group_owners:
type: list
group_members:
description:
- - The members of the group.
+ - The members of the group. If raw_membership is false, this contains the transitive members property. Otherwise, it contains the members property.
returned: always
type: list
description:
@@ -222,6 +237,7 @@ try:
from msgraph.generated.models.group import Group
from msgraph.generated.groups.item.transitive_members.transitive_members_request_builder import \
TransitiveMembersRequestBuilder
+ from msgraph.generated.groups.item.group_item_request_builder import GroupItemRequestBuilder
from msgraph.generated.models.reference_create import ReferenceCreate
except ImportError:
# This is handled in azure_rm_common
@@ -239,6 +255,7 @@ class AzureRMADGroup(AzureRMModuleBase):
present_owners=dict(type='list', elements='str'),
absent_members=dict(type='list', elements='str'),
absent_owners=dict(type='list', elements='str'),
+ raw_membership=dict(type='bool', default=False),
description=dict(type='str'),
state=dict(
type='str',
@@ -257,6 +274,7 @@ class AzureRMADGroup(AzureRMModuleBase):
self.state = None
self.results = dict(changed=False)
self._client = None
+ self.raw_membership = False
super(AzureRMADGroup, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=False,
@@ -267,9 +285,6 @@ class AzureRMADGroup(AzureRMModuleBase):
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
- # TODO remove ad_groups return. Returns as one object always
- ad_groups = []
-
try:
self._client = self.get_msgraph_client()
ad_groups = []
@@ -280,12 +295,38 @@ class AzureRMADGroup(AzureRMModuleBase):
if ad_groups:
self.object_id = ad_groups[0].id
- elif self.object_id:
+ if self.object_id:
ad_groups = [asyncio.get_event_loop().run_until_complete(self.get_group(self.object_id))]
if ad_groups:
if self.state == "present":
self.results["changed"] = False
+
+ if self.description is not None and self.description != ad_groups[0].description:
+ self.results["changed"] = True
+ else:
+ self.description = ad_groups[0].description
+ if self.display_name is not None and self.display_name != ad_groups[0].display_name:
+ self.results["changed"] = True
+ else:
+ self.display_name = ad_groups[0].display_name
+ if self.mail_nickname is not None and self.mail_nickname != ad_groups[0].mail_nickname:
+ self.results["changed"] = True
+ else:
+ self.mail_nickname = ad_groups[0].mail_nickname
+ if self.results["changed"]:
+ group = Group(
+ mail_enabled=False,
+ security_enabled=True,
+ group_types=[],
+ display_name=self.display_name,
+ mail_nickname=self.mail_nickname,
+ description=self.description
+ )
+
+ asyncio.get_event_loop().run_until_complete(self.update_group(ad_groups[0].id, group))
+ ad_groups = [asyncio.get_event_loop().run_until_complete(self.get_group(self.object_id))]
+
elif self.state == "absent":
asyncio.get_event_loop().run_until_complete(self.delete_group(self.object_id))
ad_groups = []
@@ -325,7 +366,7 @@ class AzureRMADGroup(AzureRMModuleBase):
if self.present_members or self.absent_members:
ret = asyncio.get_event_loop().run_until_complete(self.get_group_members(group_id))
- current_members = [object.id for object in ret.value]
+ current_members = [object.id for object in ret]
if self.present_members:
present_members_by_object_id = self.dictionary_from_object_urls(self.present_members)
@@ -361,7 +402,7 @@ class AzureRMADGroup(AzureRMModuleBase):
if owners_to_add:
for owner_object_id in owners_to_add:
asyncio.get_event_loop().run_until_complete(
- self.add_gropup_owner(group_id, present_owners_by_object_id[owner_object_id]))
+ self.add_group_owner(group_id, present_owners_by_object_id[owner_object_id]))
self.results["changed"] = True
if self.absent_owners:
@@ -369,7 +410,7 @@ class AzureRMADGroup(AzureRMModuleBase):
if owners_to_remove:
for owner in owners_to_remove:
- asyncio.get_event_loop().run_until_complete(self.remove_gropup_owner(group_id, owner))
+ asyncio.get_event_loop().run_until_complete(self.remove_group_owner(group_id, owner))
self.results["changed"] = True
def dictionary_from_object_urls(self, object_urls):
@@ -439,10 +480,13 @@ class AzureRMADGroup(AzureRMModuleBase):
if results["object_id"] and (self.present_members or self.absent_members):
ret = asyncio.get_event_loop().run_until_complete(self.get_group_members(results["object_id"]))
- results["group_members"] = [self.result_to_dict(object) for object in ret.value]
+ results["group_members"] = [self.result_to_dict(object) for object in ret]
return results
+ async def update_group(self, group_id, group):
+ return await self._client.groups.by_group_id(group_id).patch(body=group)
+
async def create_group(self, create_group):
return await self._client.groups.post(body=create_group)
@@ -469,6 +513,12 @@ class AzureRMADGroup(AzureRMModuleBase):
return []
async def get_group_members(self, group_id, filters=None):
+ if self.raw_membership:
+ return await self.get_raw_group_members(group_id, filters)
+ else:
+ return await self.get_transitive_group_members(group_id, filters)
+
+ async def get_transitive_group_members(self, group_id, filters=None):
request_configuration = TransitiveMembersRequestBuilder.TransitiveMembersRequestBuilderGetRequestConfiguration(
query_parameters=TransitiveMembersRequestBuilder.TransitiveMembersRequestBuilderGetQueryParameters(
count=True,
@@ -476,8 +526,22 @@ class AzureRMADGroup(AzureRMModuleBase):
)
if filters:
request_configuration.query_parameters.filter = filters
- return await self._client.groups.by_group_id(group_id).transitive_members.get(
+ response = await self._client.groups.by_group_id(group_id).transitive_members.get(
request_configuration=request_configuration)
+ return response.value
+
+ async def get_raw_group_members(self, group_id, filters=None):
+ request_configuration = GroupItemRequestBuilder.GroupItemRequestBuilderGetRequestConfiguration(
+ query_parameters=GroupItemRequestBuilder.GroupItemRequestBuilderGetQueryParameters(
+ # this ensures service principals are returned
+ # see https://learn.microsoft.com/en-us/graph/api/group-list-members?view=graph-rest-1.0&tabs=http
+ expand=["members"]
+ ),
+ )
+ if filters:
+ request_configuration.query_parameters.filter = filters
+ group = await self._client.groups.by_group_id(group_id).get(request_configuration=request_configuration)
+ return group.members
async def add_group_member(self, group_id, obj_id):
request_body = ReferenceCreate(
@@ -496,13 +560,13 @@ class AzureRMADGroup(AzureRMModuleBase):
)
return await self._client.groups.by_group_id(group_id).owners.get(request_configuration=request_configuration)
- async def add_gropup_owner(self, group_id, obj_id):
+ async def add_group_owner(self, group_id, obj_id):
request_body = ReferenceCreate(
odata_id="https://graph.microsoft.com/v1.0/users/{0}".format(obj_id),
)
await self._client.groups.by_group_id(group_id).owners.ref.post(body=request_body)
- async def remove_gropup_owner(self, group_id, obj_id):
+ async def remove_group_owner(self, group_id, obj_id):
await self._client.groups.by_group_id(group_id).owners.by_directory_object_id(obj_id).ref.delete()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py
index 3525bdf1b..04393c02e 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_adgroup_info.py
@@ -55,6 +55,12 @@ options:
- Indicate whether the groups in which a groups is a member should be returned with the returned groups.
default: False
type: bool
+ raw_membership:
+ description:
+ - By default the group_members return property is flattened and partially filtered of non-User objects before return.\
+ This argument disables those transformations.
+ default: false
+ type: bool
all:
description:
- If True, will return all groups in tenant.
@@ -84,6 +90,12 @@ EXAMPLES = '''
object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
return_owners: true
return_group_members: true
+- name: Return a specific group using object_id and return the owners and members of the group. Return service principals and nested groups.
+ azure_rm_adgroup_info:
+ object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ return_owners: true
+ return_group_members: true
+ raw_membership: true
- name: Return a specific group using object_id and return the groups the group is a member of
azure_rm_adgroup_info:
@@ -153,7 +165,7 @@ group_owners:
type: list
group_members:
description:
- - The members of the group.
+ - The members of the group. If raw_membership is set, this field may contain non-user objects (groups, service principals, etc)
returned: always
type: list
description:
@@ -173,6 +185,7 @@ try:
TransitiveMembersRequestBuilder
from msgraph.generated.groups.item.get_member_groups.get_member_groups_post_request_body import \
GetMemberGroupsPostRequestBody
+ from msgraph.generated.groups.item.group_item_request_builder import GroupItemRequestBuilder
except ImportError:
# This is handled in azure_rm_common
pass
@@ -190,6 +203,7 @@ class AzureRMADGroupInfo(AzureRMModuleBase):
return_owners=dict(type='bool', default=False),
return_group_members=dict(type='bool', default=False),
return_member_groups=dict(type='bool', default=False),
+ raw_membership=dict(type='bool', default=False),
all=dict(type='bool', default=False),
)
@@ -201,6 +215,7 @@ class AzureRMADGroupInfo(AzureRMModuleBase):
self.return_owners = False
self.return_group_members = False
self.return_member_groups = False
+ self.raw_membership = False
self.all = False
self.results = dict(changed=False)
@@ -301,7 +316,7 @@ class AzureRMADGroupInfo(AzureRMModuleBase):
if results["object_id"] and self.return_group_members:
ret = asyncio.get_event_loop().run_until_complete(self.get_group_members(results["object_id"]))
- results["group_members"] = [self.result_to_dict(object) for object in ret.value]
+ results["group_members"] = [self.result_to_dict(object) for object in ret]
if results["object_id"] and self.return_member_groups:
ret = asyncio.get_event_loop().run_until_complete(self.get_member_groups(results["object_id"]))
@@ -310,7 +325,7 @@ class AzureRMADGroupInfo(AzureRMModuleBase):
if results["object_id"] and self.check_membership:
filter = "id eq '{0}' ".format(self.check_membership)
ret = asyncio.get_event_loop().run_until_complete(self.get_group_members(results["object_id"], filter))
- results["is_member_of"] = True if ret.value and len(ret.value) != 0 else False
+ results["is_member_of"] = True if ret and len(ret) != 0 else False
return results
@@ -352,17 +367,34 @@ class AzureRMADGroupInfo(AzureRMModuleBase):
return await self._client.groups.by_group_id(group_id).owners.get(request_configuration=request_configuration)
async def get_group_members(self, group_id, filters=None):
+ if self.raw_membership:
+ return await self.get_raw_group_members(group_id, filters)
+ else:
+ return await self.get_transitive_group_members(group_id, filters)
+
+ async def get_transitive_group_members(self, group_id, filters=None):
request_configuration = TransitiveMembersRequestBuilder.TransitiveMembersRequestBuilderGetRequestConfiguration(
query_parameters=TransitiveMembersRequestBuilder.TransitiveMembersRequestBuilderGetQueryParameters(
count=True,
- select=['id', 'displayName', 'userPrincipalName', 'mailNickname', 'mail', 'accountEnabled', 'userType',
- 'appId', 'appRoleAssignmentRequired']
-
),
)
if filters:
request_configuration.query_parameters.filter = filters
- return await self._client.groups.by_group_id(group_id).transitive_members.get(
+ response = await self._client.groups.by_group_id(group_id).transitive_members.get(
+ request_configuration=request_configuration)
+ return response.value
+
+ async def get_raw_group_members(self, group_id, filters=None):
+ request_configuration = GroupItemRequestBuilder.GroupItemRequestBuilderGetRequestConfiguration(
+ query_parameters=GroupItemRequestBuilder.GroupItemRequestBuilderGetQueryParameters(
+ # this ensures service principals are returned
+ # see https://learn.microsoft.com/en-us/graph/api/group-list-members?view=graph-rest-1.0&tabs=http
+ expand=["members"]
+ ),
+ )
+ if filters:
+ request_configuration.query_parameters.filter = filters
+ return await self._client.groups.by_group_id(group_id).members.get(
request_configuration=request_configuration)
async def get_member_groups(self, obj_id):
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py
index 1e0a238c0..e1c792649 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser.py
@@ -119,6 +119,18 @@ options:
- The maximum length is 64 characters.Returned only on $select.
- Supports $filter (eq, ne, not, ge, le, in, startsWith, and eq on null values).
type: str
+ on_premises_extension_attributes:
+ description:
+ - Contains extensionAttributes1-15 for the user.
+ - These extension attributes are also known as Exchange custom attributes 1-15.
+ - For an onPremisesSyncEnabled user, the source of authority for this set of properties is the on-premises and is read-only.
+ - For a cloud-only user (where onPremisesSyncEnabled is false), these properties can be set during the creation or update of a user object.
+ - For a cloud-only user previously synced from on-premises Active Directory, these properties are read-only in Microsoft Graph\
+ but can be fully managed through the Exchange Admin Center or the Exchange Online V2 module in PowerShell.
+ type: dict
+ aliases:
+ - extension_attributes
+
extends_documentation_fragment:
- azure.azcollection.azure
@@ -143,6 +155,10 @@ EXAMPLES = '''
usage_location: "US"
mail: "{{ user_principal_name }}@contoso.com"
company_name: 'Test Company'
+ on_premises_extension_attributes:
+ extension_attribute1: "test_extension_attribute1"
+ extension_attribute2: "test_extension_attribute2"
+ extension_attribute11: "test_extension_attribute11"
- name: Update user with new value for account_enabled
azure_rm_aduser:
@@ -205,6 +221,17 @@ company_name:
type: str
returned: always
sample: 'Test Company'
+on_premises_extension_attributes:
+ description:
+ - Contains extensionAttributes1-15 for the user.
+ - These extension attributes are also known as Exchange custom attributes 1-15.
+ - For an onPremisesSyncEnabled user, the source of authority for this set of properties is the on-premises and is read-only.
+ - For a cloud-only user (where onPremisesSyncEnabled is false), these properties can be set during the creation or update of a user object.
+ - For a cloud-only user previously synced from on-premises Active Directory, these properties are read-only in Microsoft Graph\
+ but can be fully managed through the Exchange Admin Center or the Exchange Online V2 module in PowerShell.
+ type: dict
+ returned: always
+ sample: {}
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase
@@ -212,6 +239,7 @@ from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common
try:
import asyncio
from msgraph.generated.models.password_profile import PasswordProfile
+ from msgraph.generated.models.on_premises_extension_attributes import OnPremisesExtensionAttributes
from msgraph.generated.models.user import User
from msgraph.generated.users.users_request_builder import UsersRequestBuilder
except ImportError:
@@ -239,7 +267,8 @@ class AzureRMADUser(AzureRMModuleBase):
surname=dict(type='str'),
user_type=dict(type='str'),
mail=dict(type='str'),
- company_name=dict(type='str')
+ company_name=dict(type='str'),
+ on_premises_extension_attributes=dict(type='dict', aliases=['extension_attributes'])
)
self.user_principal_name = None
@@ -259,6 +288,7 @@ class AzureRMADUser(AzureRMModuleBase):
self.user_type = None
self.mail = None
self.company_name = None
+ self.on_premises_extension_attributes = None
self.log_path = None
self.log_mode = None
@@ -288,6 +318,13 @@ class AzureRMADUser(AzureRMModuleBase):
if self.state == 'present':
+ extension_attributes = None
+
+ if self.on_premises_extension_attributes:
+ extension_attributes = OnPremisesExtensionAttributes(
+ **self.on_premises_extension_attributes
+ )
+
if ad_user: # Update, changed
password = None
@@ -298,7 +335,6 @@ class AzureRMADUser(AzureRMModuleBase):
)
should_update = False
-
if self.on_premises_immutable_id and ad_user.on_premises_immutable_id != self.on_premises_immutable_id:
should_update = True
if should_update or self.usage_location and ad_user.usage_location != self.usage_location:
@@ -321,9 +357,12 @@ class AzureRMADUser(AzureRMModuleBase):
should_update = True
if should_update or self.company_name and ad_user.company_name != self.company_name:
should_update = True
-
+ if should_update or (
+ self.on_premises_extension_attributes and
+ self.on_premises_extension_attributes_to_dict(ad_user.on_premises_extension_attributes) != self.on_premises_extension_attributes):
+ should_update = True
if should_update:
- asyncio.get_event_loop().run_until_complete(self.update_user(ad_user, password))
+ asyncio.get_event_loop().run_until_complete(self.update_user(ad_user, password, extension_attributes))
self.results['changed'] = True
@@ -335,7 +374,7 @@ class AzureRMADUser(AzureRMModuleBase):
self.results['changed'] = False
else: # Create, changed
- asyncio.get_event_loop().run_until_complete(self.create_user())
+ asyncio.get_event_loop().run_until_complete(self.create_user(extension_attributes))
self.results['changed'] = True
ad_user = self.get_exisiting_user()
@@ -391,6 +430,16 @@ class AzureRMADUser(AzureRMModuleBase):
raise
return ad_user
+ def on_premises_extension_attributes_to_dict(self, on_premises_extension_attributes):
+ extension_attributes = {}
+ for index in range(1, 16 + 1):
+ attribute_name = f'extension_attribute{index}'
+ if hasattr(on_premises_extension_attributes, attribute_name):
+ attr_value = getattr(on_premises_extension_attributes, attribute_name)
+ if attr_value is not None:
+ extension_attributes[attribute_name] = attr_value
+ return extension_attributes
+
def to_dict(self, object):
return dict(
object_id=object.id,
@@ -400,10 +449,11 @@ class AzureRMADUser(AzureRMModuleBase):
mail=object.mail,
account_enabled=object.account_enabled,
user_type=object.user_type,
- company_name=object.company_name
+ company_name=object.company_name,
+ on_premises_extension_attributes=self.on_premises_extension_attributes_to_dict(object.on_premises_extension_attributes)
)
- async def update_user(self, ad_user, password):
+ async def update_user(self, ad_user, password, extension_attributes):
request_body = User(
on_premises_immutable_id=self.on_premises_immutable_id,
usage_location=self.usage_location,
@@ -415,11 +465,12 @@ class AzureRMADUser(AzureRMModuleBase):
password_profile=password,
user_principal_name=self.user_principal_name,
mail_nickname=self.mail_nickname,
- company_name=self.company_name
+ company_name=self.company_name,
+ on_premises_extension_attributes=extension_attributes
)
return await self._client.users.by_user_id(ad_user.id).patch(body=request_body)
- async def create_user(self):
+ async def create_user(self, extension_attributes):
password = PasswordProfile(
password=self.password_profile
)
@@ -435,7 +486,8 @@ class AzureRMADUser(AzureRMModuleBase):
surname=self.surname,
user_type=self.user_type,
mail=self.mail,
- company_name=self.company_name
+ company_name=self.company_name,
+ on_premises_extension_attributes=extension_attributes
)
return await self._client.users.post(body=request_body)
@@ -446,7 +498,8 @@ class AzureRMADUser(AzureRMModuleBase):
request_configuration = UsersRequestBuilder.UsersRequestBuilderGetRequestConfiguration(
query_parameters=UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName", "userType",
- "onPremisesImmutableId", "usageLocation", "givenName", "surname", "companyName"]
+ "onPremisesImmutableId", "usageLocation", "givenName", "surname", "companyName",
+ "OnPremisesExtensionAttributes"]
),
)
return await self._client.users.by_user_id(object).get(request_configuration=request_configuration)
@@ -457,7 +510,8 @@ class AzureRMADUser(AzureRMModuleBase):
query_parameters=UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
filter=filter,
select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName",
- "userType", "onPremisesImmutableId", "usageLocation", "givenName", "surname", "companyName"],
+ "userType", "onPremisesImmutableId", "usageLocation", "givenName", "surname", "companyName",
+ "OnPremisesExtensionAttributes"],
count=True
),
headers={'ConsistencyLevel': "eventual", }
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py
index 98c30be57..e71066a89 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aduser_info.py
@@ -143,6 +143,17 @@ company_name:
type: str
returned: always
sample: "Test Company"
+on_premises_extension_attributes:
+ description:
+ - Contains extensionAttributes1-15 for the user.
+ - These extension attributes are also known as Exchange custom attributes 1-15.
+ - For an onPremisesSyncEnabled user, the source of authority for this set of properties is the on-premises and is read-only.
+ - For a cloud-only user (where onPremisesSyncEnabled is false), these properties can be set during the creation or update of a user object.
+ - For a cloud-only user previously synced from on-premises Active Directory, these properties are read-only in Microsoft Graph/
+ but can be fully managed through the Exchange Admin Center or the Exchange Online V2 module in PowerShell.
+ type: dict
+ returned: always
+ sample: {}
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBase
@@ -224,7 +235,6 @@ class AzureRMADUserInfo(AzureRMModuleBase):
elif self.all:
# this returns as a list, since we parse multiple pages
ad_users = asyncio.get_event_loop().run_until_complete(self.get_users())
-
self.results['ad_users'] = [self.to_dict(user) for user in ad_users]
except Exception as e:
@@ -232,6 +242,16 @@ class AzureRMADUserInfo(AzureRMModuleBase):
return self.results
+ def on_premises_extension_attributes_to_dict(self, on_premises_extension_attributes):
+ extension_attributes = {}
+ for index in range(1, 16 + 1):
+ attribute_name = f'extension_attribute{index}'
+ if hasattr(on_premises_extension_attributes, attribute_name):
+ attr_value = getattr(on_premises_extension_attributes, attribute_name)
+ if attr_value is not None:
+ extension_attributes[attribute_name] = attr_value
+ return extension_attributes
+
def to_dict(self, object):
return dict(
object_id=object.id,
@@ -241,13 +261,15 @@ class AzureRMADUserInfo(AzureRMModuleBase):
mail=object.mail,
account_enabled=object.account_enabled,
user_type=object.user_type,
- company_name=object.company_name
+ company_name=object.company_name,
+ on_premises_extension_attributes=self.on_premises_extension_attributes_to_dict(object.on_premises_extension_attributes)
)
async def get_user(self, object):
request_configuration = UsersRequestBuilder.UsersRequestBuilderGetRequestConfiguration(
query_parameters=UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
- select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName", "userType", "companyName"]
+ select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName",
+ "userType", "companyName", "onPremisesExtensionAttributes"]
),
)
return await self._client.users.by_user_id(object).get(request_configuration=request_configuration)
@@ -255,7 +277,8 @@ class AzureRMADUserInfo(AzureRMModuleBase):
async def get_users(self):
request_configuration = UsersRequestBuilder.UsersRequestBuilderGetRequestConfiguration(
query_parameters=UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
- select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName", "userType", "companyName"]
+ select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName",
+ "userType", "companyName", "onPremisesExtensionAttributes"]
),
)
users = []
@@ -276,7 +299,7 @@ class AzureRMADUserInfo(AzureRMModuleBase):
query_parameters=UsersRequestBuilder.UsersRequestBuilderGetQueryParameters(
filter=filter,
select=["accountEnabled", "displayName", "mail", "mailNickname", "id", "userPrincipalName",
- "userType", "companyName"],
+ "userType", "companyName", "onPremisesExtensionAttributes"],
count=True
),
))
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py
index 0fb5095fe..0e1565a2c 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aks.py
@@ -15,6 +15,8 @@ version_added: "0.1.2"
short_description: Manage a managed Azure Container Service (AKS) instance
description:
- Create, update and delete a managed Azure Container Service (AKS) instance.
+ - You can only specify C(identity) or C(service_principal), not both. If you don't specify either it will
+ default to identity->type->SystemAssigned.
options:
resource_group:
@@ -170,7 +172,7 @@ options:
type: str
service_principal:
description:
- - The service principal suboptions. If not provided - use system-assigned managed identity.
+ - The service principal suboptions.
type: dict
suboptions:
client_id:
@@ -182,6 +184,25 @@ options:
description:
- The secret password associated with the service principal.
type: str
+ identity:
+ description:
+ - Identity for the Server.
+ type: dict
+ version_added: '2.4.0'
+ suboptions:
+ type:
+ description:
+ - Type of the managed identity
+ required: false
+ choices:
+ - UserAssigned
+ - SystemAssigned
+ default: SystemAssigned
+ type: str
+ user_assigned_identities:
+ description:
+ - User Assigned Managed Identity
+ type: str
enable_rbac:
description:
- Enable RBAC.
@@ -247,6 +268,8 @@ options:
choices:
- loadBalancer
- userDefinedRouting
+ - managedNATGateway
+ - userAssignedNATGateway
api_server_access_profile:
description:
- Profile of API Access configuration.
@@ -590,6 +613,9 @@ state:
provisioning_state: Succeeded
service_principal_profile:
client_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ identity:
+ "type": "UserAssigned"
+ "user_assigned_identities": {}
pod_identity_profile: {
"allow_network_plugin_kubenet": false,
"user_assigned_identities": [
@@ -633,6 +659,7 @@ def create_aks_dict(aks):
kubernetes_version=aks.kubernetes_version,
tags=aks.tags,
linux_profile=create_linux_profile_dict(aks.linux_profile),
+ identity=aks.identity.as_dict() if aks.identity else None,
service_principal_profile=create_service_principal_profile_dict(
aks.service_principal_profile),
provisioning_state=aks.provisioning_state,
@@ -810,7 +837,7 @@ network_profile_spec = dict(
dns_service_ip=dict(type='str'),
docker_bridge_cidr=dict(type='str'),
load_balancer_sku=dict(type='str', choices=['standard', 'basic']),
- outbound_type=dict(type='str', default='loadBalancer', choices=['userDefinedRouting', 'loadBalancer'])
+ outbound_type=dict(type='str', default='loadBalancer', choices=['userDefinedRouting', 'loadBalancer', 'userAssignedNATGateway', 'managedNATGateway'])
)
@@ -830,6 +857,19 @@ api_server_access_profile_spec = dict(
)
+managed_identity_spec = dict(
+ type=dict(type='str', choices=['SystemAssigned', 'UserAssigned'], default='SystemAssigned'),
+ user_assigned_identities=dict(type='str'),
+)
+
+
+class dotdict(dict):
+ """dot.notation access to dictionary attributes"""
+ __getattr__ = dict.get
+ __setattr__ = dict.__setitem__
+ __delattr__ = dict.__delitem__
+
+
class AzureRMManagedCluster(AzureRMModuleBaseExt):
"""Configuration class for an Azure RM container service (AKS) resource"""
@@ -870,6 +910,14 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
type='dict',
options=service_principal_spec
),
+ identity=dict(
+ type='dict',
+ options=managed_identity_spec,
+ required_if=[
+ ('type', 'UserAssigned', [
+ 'user_assigned_identities']),
+ ]
+ ),
enable_rbac=dict(
type='bool',
default=False
@@ -930,6 +978,7 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
self.linux_profile = None
self.agent_pool_profiles = None
self.service_principal = None
+ self.identity = None
self.enable_rbac = False
self.network_profile = None
self.aad_profile = None
@@ -938,6 +987,8 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
self.node_resource_group = None
self.pod_identity_profile = None
+ mutually_exclusive = [('identity', 'service_principal')]
+
required_if = [
('state', 'present', [
'dns_prefix', 'agent_pool_profiles'])
@@ -948,7 +999,8 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
super(AzureRMManagedCluster, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True,
- required_if=required_if)
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive)
def exec_module(self, **kwargs):
"""Main module execution method"""
@@ -972,6 +1024,11 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
available_versions = self.get_all_versions()
if not response:
to_be_updated = True
+ # Default to SystemAssigned if service_principal is not specified
+ if not self.service_principal and not self.identity:
+ self.identity = dotdict({'type': 'SystemAssigned'})
+ if self.identity:
+ changed, self.identity = self.update_identity(self.identity, {})
if self.kubernetes_version not in available_versions.keys():
self.fail("Unsupported kubernetes version. Expected one of {0} but got {1}".format(available_versions.keys(), self.kubernetes_version))
else:
@@ -1118,6 +1175,14 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
else:
self.pod_identity_profile = response['pod_identity_profile']
+ # Default to SystemAssigned if service_principal is not specified
+ if not self.service_principal and not self.identity:
+ self.identity = dotdict({'type': 'SystemAssigned'})
+ if self.identity:
+ changed, self.identity = self.update_identity(self.identity, response['identity'])
+ if changed:
+ to_be_updated = True
+
if update_agentpool:
self.log("Need to update agentpool")
if not self.check_mode:
@@ -1177,12 +1242,12 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
if self.agent_pool_profiles:
agentpools = [self.create_agent_pool_profile_instance(profile) for profile in self.agent_pool_profiles]
+ # Only service_principal or identity can be specified, but default to SystemAssigned if none specified.
if self.service_principal:
service_principal_profile = self.create_service_principal_profile_instance(self.service_principal)
identity = None
else:
service_principal_profile = None
- identity = self.managedcluster_models.ManagedClusterIdentity(type='SystemAssigned')
if self.linux_profile:
linux_profile = self.create_linux_profile_instance(self.linux_profile)
@@ -1206,7 +1271,7 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
service_principal_profile=service_principal_profile,
agent_pool_profiles=agentpools,
linux_profile=linux_profile,
- identity=identity,
+ identity=self.identity,
enable_rbac=self.enable_rbac,
network_profile=self.create_network_profile_instance(self.network_profile),
aad_profile=self.create_aad_profile_instance(self.aad_profile),
@@ -1386,6 +1451,34 @@ class AzureRMManagedCluster(AzureRMModuleBaseExt):
result[name] = self.managedcluster_models.ManagedClusterAddonProfile(config=config, enabled=config['enabled'])
return result
+ # AKS only supports a single UserAssigned Identity
+ def update_identity(self, param_identity, curr_identity):
+ user_identity = None
+ changed = False
+ current_managed_type = curr_identity.get('type', 'SystemAssigned')
+ current_managed_identity = curr_identity.get('user_assigned_identities', {})
+ param_managed_identity = param_identity.get('user_assigned_identities')
+
+ # If type set to SystamAssigned, and Resource has SystamAssigned, nothing to do
+ if 'SystemAssigned' in param_identity.get('type') and current_managed_type == 'SystemAssigned':
+ pass
+ # If type set to SystemAssigned, and Resource has current identity, remove UserAssigned identity
+ elif param_identity.get('type') == 'SystemAssigned':
+ changed = True
+ # If type in module args contains 'UserAssigned'
+ elif 'UserAssigned' in param_identity.get('type'):
+ if param_managed_identity not in current_managed_identity.keys():
+ user_identity = {param_managed_identity: {}}
+ changed = True
+
+ new_identity = self.managedcluster_models.ManagedClusterIdentity(
+ type=param_identity.get('type'),
+ )
+ if user_identity:
+ new_identity.user_assigned_identities = user_identity
+
+ return changed, new_identity
+
def main():
"""Main execution"""
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py
index f84362e95..aaf4f9876 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool.py
@@ -102,6 +102,337 @@ options:
description:
- Maximum number of pods that can run on a node.
type: int
+ kubelet_disk_type:
+ description:
+ - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
+ type: str
+ choices:
+ - OS
+ - Temporary
+ workload_runtime:
+ description:
+ - Determines the type of workload a node can run.
+ type: str
+ choices:
+ - OCIContainer
+ - WasmWasi
+ os_sku:
+ description:
+ - Specifies an OS SKU.
+ - This value must not be specified if OSType is Windows.
+ type: str
+ choices:
+ - Ubuntu
+ - CBLMariner
+ scale_down_mode:
+ description:
+ - This also effects the cluster autoscaler behavior.
+ - If not specified, it defaults to C(Delete).
+ type: str
+ default: Delete
+ choices:
+ - Delete
+ - Deallocate
+ upgrade_settings:
+ description:
+ - Settings for upgrading the agentpool.
+ type: dict
+ suboptions:
+ max_surge:
+ description:
+ - This can either be set to an integer, sucha as C(5) or percentage C(50%).
+ - If a percentage is specified, it is the percentage of the total agent pool size at the time of the upgrade.
+ - For percentages, fractional nodes are rounded up.
+ - If not specified, the default is C(1).
+ type: str
+ power_state:
+ description:
+ - When an Agent Pool is first created it is initially C(Running).
+ - The Agent Pool can be stopped by setting this field to C(Stopped).
+ - A stopped Agent Pool stops all of its VMs and does not accrue billing charges.
+ - An Agent Pool can only be stopped if it is Running and provisioning state is Succeeded.
+ type: dict
+ suboptions:
+ code:
+ description:
+ - Tells whether the cluster is C(Running) or C(Stopped).
+ type: str
+ choices:
+ - Running
+ - Stopped
+ enable_node_public_ip:
+ description:
+ - Some scenarios may require nodes in a node pool to receive theirown dedicated public IP addresses.
+ - A common scenario is for gaming workloads, where a console needs to make a direct connection to a cloud virtual machine to minimize hops.
+ type: bool
+ scale_set_priority:
+ description:
+ - The Virtual Machine Scale Set priority.
+ - If not specified, the default is C(Regular).
+ type: str
+ choices:
+ - Spot
+ - Regular
+ node_public_ip_prefix_id:
+ description:
+ - The Azure Public IP prefix's ID.
+ type: str
+ scale_set_eviction_policy:
+ description:
+ - This cannot be specified unless the I(scale_set_priority=Spot).
+ - If not specified, the default is C(Delete).
+ type: str
+ choices:
+ - Delete
+ - Deallocate
+ spot_max_price:
+ description:
+ - Possible values are any decimal value greater than zero or -1.
+ - Indicates the willingness to pay any on-demand price.
+ type: float
+ proximity_placement_group_id:
+ description:
+ - The ID for Proximity Placement Group.
+ type: str
+ kubelet_config:
+ description:
+ - The Kubelet configuration on the agent pool nodes.
+ type: dict
+ suboptions:
+ cpu_manager_policy:
+ description:
+ - Kubernetes CPU management policies.
+ - The default is C(none).
+ type: str
+ default: none
+ choices:
+ - none
+ - static
+ cpu_cfs_quota:
+ description:
+ - The default is C(true).
+ type: bool
+ default: true
+ cpu_cfs_quota_period:
+ description:
+ - The default is C(100ms).
+ - Valid values are a sequence of decimal numbers with an optional fraction and a unit suffix.
+ type: str
+ default: 100ms
+ image_gc_high_threshold:
+ description:
+ - To disable image garbage collection, set to C(100).
+ - The default is C(85)
+ type: int
+ default: 85
+ image_gc_low_threshold:
+ description:
+ - This cannot be set higher than imageGcHighThreshold.
+ - The default is C(80).
+ type: int
+ default: 80
+ topology_manager_policy:
+ description:
+ - Kubernetes Topology Manager policies.
+ - The default is C(none).
+ type: str
+ default: none
+ choices:
+ - none
+ - best-effort
+ - restricted
+ - single-numa-node
+ allowed_unsafe_sysctls:
+ description:
+ - Allowed list of unsafe sysctls or unsafe sysctl patterns.
+ type: list
+ elements: str
+ fail_swap_on:
+ description:
+ - If set to true it will make the Kubelet fail to start if swap is enabled on the node.
+ type: bool
+ container_log_max_size_mb:
+ description:
+ - The maximum size of container log file before it is rotated.
+ type: int
+ container_log_max_files:
+ description:
+ - The maximum number of container log files that can be present for a container. The number must be ≥ 2.
+ type: int
+ pod_max_pids:
+ description:
+ - The maximum number of processes per pod.
+ type: int
+ linux_os_config:
+ description:
+ - The OS configuration of Linux agent nodes.
+ type: dict
+ suboptions:
+ sysctls:
+ description:
+ - Sysctl settings for Linux agent nodes.
+ type: dict
+ suboptions:
+ net_core_somaxconn:
+ description:
+ - Sysctl setting net.core.somaxconn.
+ type: int
+ net_core_netdev_max_backlog:
+ description:
+ - Sysctl setting net.core.netdev_max_backlog.
+ type: int
+ net_core_rmem_default:
+ description:
+ - Sysctl setting net.core.rmem_default.
+ type: int
+ net_core_rmem_max:
+ description:
+ - Sysctl setting net.core.rmem_max.
+ type: int
+ net_core_wmem_default:
+ description:
+ - Sysctl setting net.core.wmem_default.
+ type: int
+ net_core_wmem_max:
+ description:
+ - Sysctl setting net.core.wmem_max.
+ type: int
+ net_core_optmem_max:
+ description:
+ - Sysctl setting net.core.optmem_max.
+ type: int
+ net_ipv4_tcp_max_syn_backlog:
+ description:
+ - Sysctl setting net.ipv4.tcp_max_syn_backlog.
+ type: int
+ net_ipv4_tcp_max_tw_buckets:
+ description:
+ - Sysctl setting net.ipv4.tcp_max_tw_buckets.
+ type: int
+ net_ipv4_tcp_fin_timeout:
+ description:
+ - Sysctl setting net.ipv4.tcp_fin_timeout.
+ type: int
+ net_ipv4_tcp_keepalive_time:
+ description:
+ - Sysctl setting net.ipv4.tcp_keepalive_time.
+ type: int
+ net_ipv4_tcp_keepalive_probes:
+ description:
+ - Sysctl setting net.ipv4.tcp_keepalive_probes.
+ type: int
+ net_ipv4_tcpkeepalive_intvl:
+ description:
+ - Sysctl setting net.ipv4.tcp_keepalive_intvl.
+ type: int
+ net_ipv4_tcp_tw_reuse:
+ description:
+ - Sysctl setting net.ipv4.tcp_tw_reuse.
+ type: bool
+ net_ipv4_ip_local_port_range:
+ description:
+ - Sysctl setting net.ipv4.ip_local_port_range.
+ type: str
+ net_ipv4_neigh_default_gc_thresh1:
+ description:
+ - Sysctl setting net.ipv4.neigh.default.gc_thresh1.
+ type: int
+ net_ipv4_neigh_default_gc_thresh2:
+ description:
+ - Sysctl setting net.ipv4.neigh.default.gc_thresh2.
+ type: int
+ net_ipv4_neigh_default_gc_thresh3:
+ description:
+ - Sysctl setting net.ipv4.neigh.default.gc_thresh3.
+ type: int
+ fs_inotify_max_user_watches:
+ description:
+ - Sysctl setting fs.inotify.max_user_watches.
+ type: int
+ fs_file_max:
+ description:
+ - Sysctl setting fs.file-max.
+ type: int
+ fs_aio_max_nr:
+ description:
+ - Sysctl setting fs.aio-max-nr.
+ type: int
+ fs_nr_open:
+ description:
+ - Sysctl setting fs.nr_open.
+ type: int
+ kernel_threads_max:
+ description:
+ - Sysctl setting kernel.threads-max.
+ type: int
+ vm_max_map_count:
+ description:
+ - Sysctl setting vm.max_map_count.
+ type: int
+ vm_swappiness:
+ description:
+ - Sysctl setting vm.swappiness.
+ type: int
+ vm_vfs_cache_pressure:
+ description:
+ - Sysctl setting vm.vfs_cache_pressure.
+ type: int
+ net_netfilter_nf_conntrack_max:
+ description:
+ - sysctl setting net.netfilter.nf_conntrack_max.
+ type: int
+ net_netfilter_nf_conntrack_buckets:
+ description:
+ - Sysctl setting net.netfilter.nf_conntrack_buckets.
+ type: int
+ transparent_huge_page_enabled:
+ description:
+ - The node agent pool transparent hugepage.
+ - The default is C(always).
+ type: str
+ default: always
+ choices:
+ - always
+ - madvise
+ - never
+ transparent_huge_page_defrag:
+ description:
+ - The node agent pool transparent huge page deferag.
+ - The default is C(madvise).
+ type: str
+ default: madvise
+ choices:
+ - always
+ - defer
+ - defer+madvise
+ - madvise
+ - never
+ swap_file_size_mb:
+ description:
+ - The size in MB of a swap file that will be created on each node.
+ type: int
+ enable_encryption_at_host:
+ description:
+ - This is only supported on certain VM sizes and in certain Azure regions.
+ type: bool
+ enable_ultra_ssd:
+ description:
+ - Whether to enable UltraSSD.
+ type: bool
+ enable_fips:
+ description:
+ - Whether enable FIPS node pool.
+ type: bool
+ gpu_instance_profile:
+ description:
+ - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
+ type: str
+ choices:
+ - MIG1g
+ - MIG2g
+ - MIG3g
+ - MIG4g
+ - MIG7g
state:
description:
- State of the automation runbook. Use C(present) to create or update a automation runbook and use C(absent) to delete.
@@ -137,6 +468,31 @@ EXAMPLES = '''
availability_zones:
- 1
- 2
+- name: Create a node agent pool with custom config
+ azure_rm_aksagentpool:
+ resource_group: "{{ resource_group }}"
+ cluster_name: "min{{ rpfx }}"
+ name: default-new2
+ count: 1
+ vm_size: Standard_B2s
+ type_properties_type: VirtualMachineScaleSets
+ mode: System
+ node_labels: {"release":"stable"}
+ max_pods: 42
+ enable_auto_scaling: true
+ min_count: 1
+ max_count: 10
+ orchestrator_version: 1.23.5
+ availability_zones:
+ - 1
+ kubelet_config:
+ cpu_manager_policy: static
+ cpu_cfs_quota: true
+ fail_swap_on: false
+ linux_os_config:
+ transparent_huge_page_enabled: madvise
+ swap_file_size_mb: 1500
+ transparent_huge_page_defrag: defer+madvise
- name: Delete node agent pool
azure_rm_aksagentpool:
resource_group: "{{ resource_group }}"
@@ -313,6 +669,97 @@ aks_agent_pools:
type: str
returned: always
sample: null
+ kubelet_config:
+ description:
+ - The Kubelet configuration on the agent pool nodes.
+ type: dict
+ returned: always
+ sample: {
+ cpu_cfs_quota: true,
+ cpu_cfs_quota_period: 100ms,
+ cpu_manager_policy: static,
+ fail_swap_on: false,
+ image_gc_high_threshold: 85,
+ image_gc_low_threshold: 80,
+ topology_manager_policy: none
+ }
+ linux_os_config:
+ description:
+ - The OS configuration of Linux agent nodes.
+ type: dict
+ returned: always
+ sample: {
+ swap_file_size_mb: 1500,
+ sysctls: {},
+ transparent_huge_page_defrag: defer+madvise,
+ transparent_huge_page_enabled: madvise
+ }
+ power_state:
+ description:
+ - The agent pool's power state.
+ type: dict
+ returned: always
+ sample: {code: Running}
+ os_sku:
+ description:
+ - The node agent pool's SKU.
+ type: str
+ returned: always
+ sample: Ubuntu
+ tags:
+ description:
+ - The tags of the node agent pool.
+ type: dict
+ returned: always
+ sample: {key1: value1, key2: value2}
+ kubelet_disk_type:
+ description:
+ - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
+ type: str
+ returned: always
+ sample: OS
+ workload_runtime:
+ description:
+ - Determines the type of workload a node can run.
+ type: str
+ returned: always
+ sample: OCIContainer
+ scale_down_mode:
+ description:
+ - This also effects the cluster autoscaler behavior.
+ type: str
+ returned: always
+ sample: Delete
+ node_public_ip_prefix_id:
+ description:
+ - The Azure Public IP prefix's ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/publicIPPrefixes/pip01"
+ proximity_placement_group_id:
+ description:
+ - The ID for Proximity Placement Group.
+ type: str
+ returned: always
+ sample: /subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Compute/proximityPlacementGroups/proxi01
+ enable_encryption_at_host:
+ description:
+ - This is only supported on certain VM sizes and in certain Azure regions.
+ type: bool
+ returned: always
+ sample: false
+ enable_ultra_ssd:
+ description:
+ - Whether enable FIPS node pool.
+ type: bool
+ returned: always
+ sample: false
+ gpu_instance_profile:
+ description:
+ - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
+ type: str
+ returned: always
+ sample: MIG1g
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
@@ -387,6 +834,144 @@ class AzureRMAksAgentPool(AzureRMModuleBase):
max_pods=dict(
type='int'
),
+ kubelet_disk_type=dict(
+ type='str', choices=['OS', 'Temporary']
+ ),
+ workload_runtime=dict(
+ type='str', choices=['OCIContainer', 'WasmWasi']
+ ),
+ os_sku=dict(
+ type='str', choices=["Ubuntu", "CBLMariner"]
+ ),
+ scale_down_mode=dict(
+ type='str',
+ choices=['Delete', 'Deallocate'],
+ default='Delete'
+ ),
+ upgrade_settings=dict(
+ type='dict',
+ options=dict(
+ max_surge=dict(
+ type='str'
+ )
+ )
+ ),
+ power_state=dict(
+ type='dict',
+ options=dict(
+ code=dict(
+ type='str',
+ choices=['Running', 'Stopped']
+ )
+ )
+ ),
+ enable_node_public_ip=dict(
+ type='bool'
+ ),
+ scale_set_priority=dict(
+ type='str',
+ choices=["Spot", "Regular"],
+ ),
+ node_public_ip_prefix_id=dict(
+ type='str'
+ ),
+ scale_set_eviction_policy=dict(
+ type='str',
+ choices=['Delete', 'Deallocate'],
+ ),
+ spot_max_price=dict(
+ type='float'
+ ),
+ proximity_placement_group_id=dict(
+ type='str'
+ ),
+ kubelet_config=dict(
+ type='dict',
+ options=dict(
+ cpu_manager_policy=dict(type='str', choices=['none', 'static'], default='none'),
+ cpu_cfs_quota=dict(type='bool', default='true'),
+ cpu_cfs_quota_period=dict(type='str', default='100ms'),
+ image_gc_high_threshold=dict(type='int', default=85),
+ image_gc_low_threshold=dict(type='int', default=80),
+ topology_manager_policy=dict(
+ type='str',
+ default='none',
+ choices=['none', 'best-effort', 'restricted', 'single-numa-node']
+ ),
+ allowed_unsafe_sysctls=dict(
+ type='list',
+ elements='str'
+ ),
+ fail_swap_on=dict(type='bool'),
+ container_log_max_size_mb=dict(type='int'),
+ container_log_max_files=dict(type='int'),
+ pod_max_pids=dict(type='int')
+ )
+ ),
+ linux_os_config=dict(
+ type='dict',
+ options=dict(
+ sysctls=dict(
+ type='dict',
+ options=dict(
+ net_core_somaxconn=dict(type='int'),
+ net_core_netdev_max_backlog=dict(type='int'),
+ net_core_rmem_default=dict(type='int'),
+ net_core_rmem_max=dict(type='int'),
+ net_core_wmem_default=dict(type='int'),
+ net_core_wmem_max=dict(type='int'),
+ net_core_optmem_max=dict(type='int'),
+ net_ipv4_tcp_max_syn_backlog=dict(type='int'),
+ net_ipv4_tcp_max_tw_buckets=dict(type='int'),
+ net_ipv4_tcp_fin_timeout=dict(type='int'),
+ net_ipv4_tcp_keepalive_time=dict(type='int'),
+ net_ipv4_tcp_keepalive_probes=dict(type='int'),
+ net_ipv4_tcpkeepalive_intvl=dict(type='int'),
+ net_ipv4_tcp_tw_reuse=dict(type='bool'),
+ net_ipv4_ip_local_port_range=dict(type='str'),
+ net_ipv4_neigh_default_gc_thresh1=dict(type='int'),
+ net_ipv4_neigh_default_gc_thresh2=dict(type='int'),
+ net_ipv4_neigh_default_gc_thresh3=dict(type='int'),
+ net_netfilter_nf_conntrack_max=dict(type='int'),
+ net_netfilter_nf_conntrack_buckets=dict(type='int'),
+ fs_inotify_max_user_watches=dict(type='int'),
+ fs_file_max=dict(type='int'),
+ fs_aio_max_nr=dict(type='int'),
+ fs_nr_open=dict(type='int'),
+ kernel_threads_max=dict(type='int'),
+ vm_max_map_count=dict(type='int'),
+ vm_swappiness=dict(type='int'),
+ vm_vfs_cache_pressure=dict(type='int')
+ )
+ ),
+ transparent_huge_page_enabled=dict(
+ type='str',
+ choices=['always', 'madvise', 'never'],
+ default='always'
+ ),
+ swap_file_size_mb=dict(
+ type='int'
+ ),
+ transparent_huge_page_defrag=dict(
+ type='str',
+ default='madvise',
+ choices=['always', 'defer', 'defer+madvise', 'madvise', 'never']
+ )
+ )
+ ),
+ enable_encryption_at_host=dict(
+ type='bool'
+ ),
+ enable_ultra_ssd=dict(
+ type='bool'
+ ),
+ enable_fips=dict(
+ type='bool'
+ ),
+ gpu_instance_profile=dict(
+ type='str',
+ choices=["MIG1g", "MIG2g", "MIG3g", "MIG4g", "MIG7g"]
+ ),
state=dict(
type='str',
choices=['present', 'absent'],
@@ -413,13 +998,32 @@ class AzureRMAksAgentPool(AzureRMModuleBase):
self.node_labels = None
self.min_count = None
self.max_pods = None
+ self.tags = None
+ self.kubelet_disk_type = None
+ self.workload_runtime = None
+ self.os_sku = None
+ self.scale_down_mode = None
+ self.upgrade_settings = None
+ self.power_state = None
+ self.enable_node_public_ip = None
+ self.scale_set_priority = None
+ self.node_public_ip_prefix_id = None
+ self.scale_set_eviction_policy = None
+ self.spot_max_price = None
+ self.proximity_placement_group_id = None
+ self.kubelet_config = None
+ self.linux_os_config = None
+ self.enable_encryption_at_host = None
+ self.enable_ultra_ssd = None
+ self.enable_fips = None
+ self.gpu_instance_profile = None
self.body = dict()
super(AzureRMAksAgentPool, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True)
def exec_module(self, **kwargs):
- for key in list(self.module_arg_spec):
+ for key in list(self.module_arg_spec) + ['tags']:
setattr(self, key, kwargs[key])
if key not in ['resource_group', 'cluster_name', 'name', 'state']:
self.body[key] = kwargs[key]
@@ -430,8 +1034,16 @@ class AzureRMAksAgentPool(AzureRMModuleBase):
if self.state == 'present':
if agent_pool:
+ update_tags, self.body['tags'] = self.update_tags(agent_pool.get('tags'))
for key in self.body.keys():
- if self.body[key] is not None and self.body[key] != agent_pool[key]:
+ if key == 'tags':
+ if update_tags:
+ changed = True
+ elif self.body[key] is not None and isinstance(self.body[key], dict):
+ for item in self.body[key].keys():
+ if self.body[key][item] is not None and self.body[key][item] != agent_pool[key].get(item):
+ changed = True
+ elif self.body[key] is not None and self.body[key] != agent_pool[key] and key not in ['scale_set_priority', 'spot_max_price']:
changed = True
else:
self.body[key] = agent_pool[key]
@@ -509,14 +1121,54 @@ class AzureRMAksAgentPool(AzureRMModuleBase):
spot_max_price=agent_pool.spot_max_price,
node_labels=agent_pool.node_labels,
node_taints=agent_pool.node_taints,
+ tags=agent_pool.tags,
+ kubelet_disk_type=agent_pool.kubelet_disk_type,
+ workload_runtime=agent_pool.workload_runtime,
+ os_sku=agent_pool.os_sku,
+ scale_down_mode=agent_pool.scale_down_mode,
+ power_state=dict(),
+ node_public_ip_prefix_id=agent_pool.node_public_ip_prefix_id,
+ proximity_placement_group_id=agent_pool.proximity_placement_group_id,
+ kubelet_config=dict(),
+ linux_os_config=dict(),
+ enable_encryption_at_host=agent_pool.enable_encryption_at_host,
+ enable_ultra_ssd=agent_pool.enable_ultra_ssd,
+ enable_fips=agent_pool.enable_fips,
+ gpu_instance_profile=agent_pool.gpu_instance_profile
)
if agent_pool.upgrade_settings is not None:
agent_pool_dict['upgrade_settings']['max_surge'] = agent_pool.upgrade_settings.max_surge
+ else:
+ agent_pool_dict['upgrade_settings'] = None
if agent_pool.availability_zones is not None:
for key in agent_pool.availability_zones:
agent_pool_dict['availability_zones'].append(int(key))
+ else:
+ agent_pool_dict['availability_zones'] = None
+
+ if agent_pool.kubelet_config is not None:
+ agent_pool_dict['kubelet_config'] = agent_pool.kubelet_config.as_dict()
+ else:
+ agent_pool_dict['kubelet_config'] = None
+
+ if agent_pool.power_state is not None:
+ agent_pool_dict['power_state']['code'] = agent_pool.power_state.code
+ else:
+ agent_pool_dict['power_state'] = None
+
+ if agent_pool.linux_os_config is not None:
+ agent_pool_dict['linux_os_config']['transparent_huge_page_enabled'] = agent_pool.linux_os_config.transparent_huge_page_enabled
+ agent_pool_dict['linux_os_config']['transparent_huge_page_defrag'] = agent_pool.linux_os_config.transparent_huge_page_defrag
+ agent_pool_dict['linux_os_config']['swap_file_size_mb'] = agent_pool.linux_os_config.swap_file_size_mb
+ agent_pool_dict['linux_os_config']['sysctls'] = dict()
+ if agent_pool.linux_os_config.sysctls is not None:
+ agent_pool_dict['linux_os_config']['sysctls'] = agent_pool.linux_os_config.sysctls.as_dict()
+ else:
+ agent_pool_dict['linux_os_config']['sysctls'] = None
+ else:
+ agent_pool_dict['linux_os_config'] = None
return agent_pool_dict
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py
index 59f3b696d..99ba08254 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_aksagentpool_info.py
@@ -168,6 +168,12 @@ aks_agent_pools:
type: str
returned: always
sample: Linux
+ os_sku:
+ description:
+ - OS SKU to be used to specify os type.
+ type: str
+ returned: always
+ sample: Windows2022
provisioning_state:
description:
- The current deployment or provisioning state, which only appears in the response.
@@ -222,6 +228,91 @@ aks_agent_pools:
type: str
returned: always
sample: null
+ kubelet_config:
+ description:
+ - The Kubelet configuration on the agent pool nodes.
+ type: dict
+ returned: always
+ sample: {
+ cpu_cfs_quota: true,
+ cpu_cfs_quota_period: 100ms,
+ cpu_manager_policy: static,
+ fail_swap_on: false,
+ image_gc_high_threshold: 85,
+ image_gc_low_threshold: 80,
+ topology_manager_policy: none
+ }
+ linux_os_config:
+ description:
+ - The OS configuration of Linux agent nodes.
+ type: dict
+ returned: always
+ sample: {
+ swap_file_size_mb: 1500,
+ sysctls: {},
+ transparent_huge_page_defrag: defer+madvise,
+ transparent_huge_page_enabled: madvise
+ }
+ power_state:
+ description:
+ - The agent pool's power state.
+ type: dict
+ returned: always
+ sample: {code: Running}
+ tags:
+ description:
+ - The tags of the node agent pool.
+ type: dict
+ returned: always
+ sample: {key1: value1, key2: value2}
+ kubelet_disk_type:
+ description:
+ - Determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage.
+ type: str
+ returned: always
+ sample: OS
+ workload_runtime:
+ description:
+ - Determines the type of workload a node can run.
+ type: str
+ returned: always
+ sample: OCIContainer
+ scale_down_mode:
+ description:
+ - This also effects the cluster autoscaler behavior.
+ type: str
+ returned: always
+ sample: Delete
+ node_public_ip_prefix_id:
+ description:
+ - The Azure Public IP prefix's ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Network/publicIPPrefixes/pip01"
+ proximity_placement_group_id:
+ description:
+ - The ID for Proximity Placement Group.
+ type: str
+ returned: always
+ sample: /subscriptions/xxx-xxx/resourceGroups/myRG/providers/Microsoft.Compute/proximityPlacementGroups/proxi01
+ enable_encryption_at_host:
+ description:
+ - This is only supported on certain VM sizes and in certain Azure regions.
+ type: bool
+ returned: always
+ sample: false
+ enable_ultra_ssd:
+ description:
+ - Whether enable FIPS node pool.
+ type: bool
+ returned: always
+ sample: false
+ gpu_instance_profile:
+ description:
+ - GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU.
+ type: str
+ returned: always
+ sample: MIG1g
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
@@ -301,6 +392,7 @@ class AzureRMAgentPoolInfo(AzureRMModuleBase):
vnet_subnet_id=agent_pool.vnet_subnet_id,
max_pods=agent_pool.max_pods,
os_type=agent_pool.os_type,
+ os_sku=agent_pool.os_sku,
max_count=agent_pool.max_count,
min_count=agent_pool.min_count,
enable_auto_scaling=agent_pool.enable_auto_scaling,
@@ -317,14 +409,53 @@ class AzureRMAgentPoolInfo(AzureRMModuleBase):
spot_max_price=agent_pool.spot_max_price,
node_labels=agent_pool.node_labels,
node_taints=agent_pool.node_taints,
+ tags=agent_pool.tags,
+ kubelet_disk_type=agent_pool.kubelet_disk_type,
+ workload_runtime=agent_pool.workload_runtime,
+ scale_down_mode=agent_pool.scale_down_mode,
+ power_state=dict(),
+ node_public_ip_prefix_id=agent_pool.node_public_ip_prefix_id,
+ proximity_placement_group_id=agent_pool.proximity_placement_group_id,
+ kubelet_config=dict(),
+ linux_os_config=dict(),
+ enable_encryption_at_host=agent_pool.enable_encryption_at_host,
+ enable_ultra_ssd=agent_pool.enable_ultra_ssd,
+ enable_fips=agent_pool.enable_fips,
+ gpu_instance_profile=agent_pool.gpu_instance_profile
)
if agent_pool.upgrade_settings is not None:
agent_pool_dict['upgrade_settings']['max_surge'] = agent_pool.upgrade_settings.max_surge
+ else:
+ agent_pool_dict['upgrade_settings'] = None
if agent_pool.availability_zones is not None:
for key in agent_pool.availability_zones:
agent_pool_dict['availability_zones'].append(int(key))
+ else:
+ agent_pool_dict['availability_zones'] = None
+
+ if agent_pool.kubelet_config is not None:
+ agent_pool_dict['kubelet_config'] = agent_pool.kubelet_config.as_dict()
+ else:
+ agent_pool_dict['kubelet_config'] = None
+
+ if agent_pool.linux_os_config is not None:
+ agent_pool_dict['linux_os_config']['transparent_huge_page_enabled'] = agent_pool.linux_os_config.transparent_huge_page_enabled
+ agent_pool_dict['linux_os_config']['transparent_huge_page_defrag'] = agent_pool.linux_os_config.transparent_huge_page_defrag
+ agent_pool_dict['linux_os_config']['swap_file_size_mb'] = agent_pool.linux_os_config.swap_file_size_mb
+ agent_pool_dict['linux_os_config']['sysctls'] = dict()
+ if agent_pool.linux_os_config.sysctls is not None:
+ agent_pool_dict['linux_os_config']['sysctls'] = agent_pool.linux_os_config.sysctls.as_dict()
+ else:
+ agent_pool_dict['linux_os_config']['sysctls'] = None
+ else:
+ agent_pool_dict['linux_os_config'] = None
+
+ if agent_pool.power_state is not None:
+ agent_pool_dict['power_state']['code'] = agent_pool.power_state.code
+ else:
+ agent_pool_dict['power_state'] = None
return agent_pool_dict
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py
index 863839329..eb6c297d4 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_backupazurevm.py
@@ -379,14 +379,8 @@ class BackupAzureVM(AzureRMModuleBaseExt):
self.log('Error attempting to backup azure vm.')
self.fail(
'Error while taking on-demand backup: {0}'.format(str(e)))
-
- if hasattr(response, 'body'):
- response = json.loads(response.body())
- elif hasattr(response, 'context'):
- response = response.context['deserialized_data']
- else:
- self.fail("Create or Updating fail, no match message return, return info as {0}".format(response))
-
+ # The return value is None, which only triggers the backup. Backups also take some time to complete.
+ response = dict(msg='The backup has been successfully triggered, please monitor the backup process on the Backup Jobs page')
return response
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py
index a8852c583..01dda868e 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_cognitivesearch.py
@@ -395,31 +395,45 @@ class AzureRMSearch(AzureRMModuleBase):
self.results['changed'] = True
search_update_model.identity = self.search_client.services.models.Identity(type=self.identity)
+ network_update = False
if self.network_rule_set:
for rule in self.network_rule_set:
if len(self.network_rule_set) != len(self.account_dict.get('network_rule_set')) or rule not in self.account_dict.get('network_rule_set'):
self.results['changed'] = True
+ network_update = True
self.firewall_list.append(self.search_client.services.models.IpRule(value=rule))
search_update_model.network_rule_set = dict(ip_rules=self.firewall_list)
+ elif not network_update:
+ firewall_list = []
+ for rule in self.account_dict.get('network_rule_set', []):
+ firewall_list.append(self.search_client.services.models.IpRule(value=rule))
+ search_update_model.network_rule_set = dict(ip_rules=firewall_list)
if self.partition_count and self.account_dict.get('partition_count') != self.partition_count:
self.results['changed'] = True
search_update_model.partition_count = self.partition_count
+ else:
+ search_update_model.partition_count = self.account_dict.get('partition_count')
if self.public_network_access and self.account_dict.get('public_network_access').lower() != self.public_network_access.lower():
self.results['changed'] = True
search_update_model.public_network_access = self.public_network_access
+ else:
+ search_update_model.public_network_access = self.account_dict.get('public_network_access')
if self.replica_count and self.account_dict.get('replica_count') != self.replica_count:
self.results['changed'] = True
search_update_model.replica_count = self.replica_count
+ else:
+ search_update_model.replica_count = self.account_dict.get('replica_count')
if self.sku and self.account_dict.get('sku') != self.sku:
self.fail("Updating sku of an existing search service is not allowed.")
- if self.tags and self.account_dict.get('tags') != self.tags:
+ update_tags, new_tags = self.update_tags(self.account_dict.get('tags'))
+ if update_tags:
self.results['changed'] = True
- search_update_model.tags = self.tags
+ search_update_model.tags = new_tags
self.log('Updating search {0}'.format(self.name))
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py
deleted file mode 100644
index b46907339..000000000
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore.py
+++ /dev/null
@@ -1,807 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2020 David Duque Hernández, (@next-davidduquehernandez)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: azure_rm_datalakestore
-version_added: "1.4.0"
-short_description: Manage Azure data lake store
-description:
- - Create, update or delete a data lake store.
-options:
- default_group:
- description:
- - The default owner group for all new folders and files created in the Data Lake Store account.
- type: str
- encryption_config:
- description:
- - The Key Vault encryption configuration.
- type: dict
- suboptions:
- type:
- description:
- - The type of encryption configuration being used.
- choices:
- - UserManaged
- - ServiceManaged
- required: true
- type: str
- key_vault_meta_info:
- description:
- - The Key Vault information for connecting to user managed encryption keys.
- type: dict
- suboptions:
- key_vault_resource_id:
- description:
- - The resource identifier for the user managed Key Vault being used to encrypt.
- type: str
- required: true
- encryption_key_name:
- description:
- - The name of the user managed encryption key.
- type: str
- required: true
- encryption_key_version:
- description:
- - The version of the user managed encryption key.
- type: str
- required: true
- encryption_state:
- description:
- - The current state of encryption for this Data Lake Store account.
- choices:
- - Enabled
- - Disabled
- type: str
- firewall_allow_azure_ips:
- description:
- - The current state of allowing or disallowing IPs originating within Azure through the firewall.
- - If the firewall is disabled, this is not enforced.
- choices:
- - Enabled
- - Disabled
- type: str
- firewall_rules:
- description:
- - The list of firewall rules associated with this Data Lake Store account.
- type: list
- elements: dict
- suboptions:
- name:
- description:
- - The unique name of the firewall rule to create.
- type: str
- required: true
- start_ip_address:
- description:
- - The start IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- required: true
- end_ip_address:
- description:
- - The end IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- required: true
- firewall_state:
- description:
- - The current state of the IP address firewall for this Data Lake Store account.
- choices:
- - Enabled
- - Disabled
- type: str
- identity:
- description:
- - The Key Vault encryption identity, if any.
- choices:
- - SystemAssigned
- type: str
- location:
- description:
- - The resource location.
- type: str
- name:
- description:
- - The name of the Data Lake Store account.
- type: str
- required: true
- new_tier:
- description:
- - The commitment tier to use for next month.
- choices:
- - Consumption
- - Commitment_1TB
- - Commitment_10TB
- - Commitment_100TB
- - Commitment_500TB
- - Commitment_1PB
- - Commitment_5PB
- type: str
- resource_group:
- description:
- - The name of the Azure resource group to use.
- required: true
- type: str
- aliases:
- - resource_group_name
- state:
- description:
- - State of the data lake store. Use C(present) to create or update a data lake store and use C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
- type: str
- virtual_network_rules:
- description:
- - The list of virtual network rules associated with this Data Lake Store account.
- type: list
- elements: dict
- suboptions:
- name:
- description:
- - The unique name of the virtual network rule to create.
- type: str
- required: true
- subnet_id:
- description:
- - The resource identifier for the subnet.
- type: str
- required: true
-
-extends_documentation_fragment:
- - azure.azcollection.azure
- - azure.azcollection.azure_tags
-
-author:
- - David Duque Hernández (@next-davidduquehernandez)
-'''
-
-EXAMPLES = '''
-- name: Create Azure Data Lake Store
- azure_rm_datalakestore:
- resource_group: myResourceGroup
- name: myDataLakeStore
-'''
-
-RETURN = '''
-state:
- description:
- - Facts for Azure Data Lake Store created/updated.
- returned: always
- type: complex
- contains:
- account_id:
- description:
- - The unique identifier associated with this Data Lake Store account.
- returned: always
- type: str
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- creation_time:
- description:
- - The account creation time.
- returned: always
- type: str
- sample: '2020-01-01T00:00:00.000000+00:00'
- current_tier:
- description:
- - The commitment tier in use for the current month.
- type: str
- returned: always
- sample: Consumption
- default_group:
- description:
- - The default owner group for all new folders and files created in the Data Lake Store account.
- type: str
- sample: null
- encryption_config:
- description:
- - The Key Vault encryption configuration.
- type: complex
- contains:
- type:
- description:
- - The type of encryption configuration being used.
- type: str
- returned: always
- sample: ServiceManaged
- key_vault_meta_info:
- description:
- - The Key Vault information for connecting to user managed encryption keys.
- type: complex
- contains:
- key_vault_resource_id:
- description:
- - The resource identifier for the user managed Key Vault being used to encrypt.
- type: str
- returned: always
- sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.KeyVault/vaults/tstkv
- encryption_key_name:
- description:
- - The name of the user managed encryption key.
- type: str
- returned: always
- sample: KeyName
- encryption_key_version:
- description:
- - The version of the user managed encryption key.
- type: str
- returned: always
- sample: 86a1e3b7406f45afa0d54e21eff47e39
- encryption_provisioning_state:
- description:
- - The current state of encryption provisioning for this Data Lake Store account.
- type: str
- sample: Succeeded
- encryption_state:
- description:
- - The current state of encryption for this Data Lake Store account.
- type: str
- returned: always
- sample: Enabled
- endpoint:
- description:
- - The full CName endpoint for this account.
- returned: always
- type: str
- sample: testaccount.azuredatalakestore.net
- firewall_allow_azure_ips:
- description:
- - The current state of allowing or disallowing IPs originating within Azure through the firewall.
- - If the firewall is disabled, this is not enforced.
- type: str
- returned: always
- sample: Disabled
- firewall_rules:
- description:
- - The list of firewall rules associated with this Data Lake Store account.
- type: list
- returned: always
- contains:
- name:
- description:
- - The resource name.
- type: str
- returned: always
- sample: Example Name
- start_ip_address:
- description:
- - The start IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- returned: always
- sample: 192.168.1.1
- end_ip_address:
- description:
- - The end IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- returned: always
- sample: 192.168.1.254
- firewall_state:
- description:
- - The current state of the IP address firewall for this Data Lake Store account.
- type: str
- returned: always
- sample: Enabled
- id:
- description:
- - The resource identifier.
- returned: always
- type: str
- sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.DataLakeStore/accounts/testaccount
- identity:
- description:
- - The Key Vault encryption identity, if any.
- type: complex
- contains:
- type:
- description:
- - The type of encryption being used.
- type: str
- sample: SystemAssigned
- principal_id:
- description:
- - The principal identifier associated with the encryption.
- type: str
- sample: 00000000-0000-0000-0000-000000000000
- tenant_id:
- description:
- - The tenant identifier associated with the encryption.
- type: str
- sample: 00000000-0000-0000-0000-000000000000
- last_modified_time:
- description:
- - The account last modified time.
- returned: always
- type: str
- sample: '2020-01-01T00:00:00.000000+00:00'
- location:
- description:
- - The resource location.
- returned: always
- type: str
- sample: westeurope
- name:
- description:
- - The resource name.
- returned: always
- type: str
- sample: testaccount
- new_tier:
- description:
- - The commitment tier to use for next month.
- type: str
- returned: always
- sample: Consumption
- provisioning_state:
- description:
- - The provisioning status of the Data Lake Store account.
- returned: always
- type: str
- sample: Succeeded
- state:
- description:
- - The state of the Data Lake Store account.
- returned: always
- type: str
- sample: Active
- tags:
- description:
- - The resource tags.
- returned: always
- type: dict
- sample: { "tag1":"abc" }
- trusted_id_providers:
- description:
- - The current state of the trusted identity provider feature for this Data Lake Store account.
- type: list
- returned: always
- contains:
- id:
- description:
- - The resource identifier.
- type: str
- name:
- description:
- - The resource name.
- type: str
- type:
- description:
- - The resource type.
- type: str
- id_provider:
- description:
- - The URL of this trusted identity provider.
- type: str
- trusted_id_provider_state:
- description:
- - The list of trusted identity providers associated with this Data Lake Store account.
- type: str
- returned: always
- sample: Enabled
- type:
- description:
- - The resource type.
- returned: always
- type: str
- sample: Microsoft.DataLakeStore/accounts
- virtual_network_rules:
- description:
- - The list of virtual network rules associated with this Data Lake Store account.
- type: list
- returned: always
- contains:
- name:
- description:
- - The resource name.
- type: str
- sample: Rule Name
- subnet_id:
- description:
- - The resource identifier for the subnet.
- type: str
- sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet/subnets/default
-
-'''
-
-from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.core.exceptions import ResourceNotFoundError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-firewall_rules_item = dict(
- name=dict(type='str', required=True),
- start_ip_address=dict(type='str', required=True),
- end_ip_address=dict(type='str', required=True)
-)
-
-virtual_network_rules_item = dict(
- name=dict(type='str', required=True),
- subnet_id=dict(type='str', required=True)
-)
-
-
-class AzureRMDatalakeStore(AzureRMModuleBase):
- def __init__(self):
-
- self.module_arg_spec = dict(
- default_group=dict(type='str'),
- encryption_config=dict(
- type='dict',
- options=dict(
- type=dict(type='str', choices=['UserManaged', 'ServiceManaged'], required=True),
- key_vault_meta_info=dict(
- type='dict',
- no_log=True,
- options=dict(
- key_vault_resource_id=dict(type='str', required=True),
- encryption_key_name=dict(type='str', required=True),
- encryption_key_version=dict(type='str', no_log=True, required=True)
- )
- ),
- )
- ),
- encryption_state=dict(type='str', choices=['Enabled', 'Disabled']),
- firewall_allow_azure_ips=dict(type='str', choices=['Enabled', 'Disabled']),
- firewall_rules=dict(
- type='list',
- elements='dict',
- options=firewall_rules_item
- ),
- firewall_state=dict(type='str', choices=['Enabled', 'Disabled']),
- identity=dict(type='str', choices=['SystemAssigned']),
- location=dict(type='str'),
- name=dict(type='str', required=True),
- new_tier=dict(type='str', choices=['Consumption', 'Commitment_1TB', 'Commitment_10TB', 'Commitment_100TB',
- 'Commitment_500TB', 'Commitment_1PB', 'Commitment_5PB']),
- resource_group=dict(type='str', required=True, aliases=['resource_group_name']),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- virtual_network_rules=dict(
- type='list',
- elements='dict',
- options=virtual_network_rules_item
- ),
- )
-
- self.state = None
- self.name = None
- self.resource_group = None
- self.location = None
- self.new_tier = None
- self.default_group = None
- self.encryption_config = dict()
- self.encryption_config_model = None
- self.encryption_state = None
- self.firewall_state = None
- self.firewall_allow_azure_ips = None
- self.firewall_rules = None
- self.firewall_rules_model = None
- self.virtual_network_rules = None
- self.virtual_network_rules_model = None
- self.identity = None
- self.identity_model = None
-
- self.results = dict(changed=False)
- self.account_dict = None
-
- super(AzureRMDatalakeStore, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=False,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- self.module.deprecate("The azure_rm_datalakestore.py will deprecated. Azure Data Lake Storage Gen1 retired on February 29,2024", version=(2.3, ))
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- if self.encryption_config:
- key_vault_meta_info_model = None
- if self.encryption_config.get('key_vault_meta_info'):
- key_vault_meta_info_model = self.datalake_store_models.KeyVaultMetaInfo(
- key_vault_resource_id=self.encryption_config.get('key_vault_meta_info').get('key_vault_resource_id'),
- encryption_key_name=self.encryption_config.get('key_vault_meta_info').get('encryption_key_name'),
- encryption_key_version=self.encryption_config.get('key_vault_meta_info').get('encryption_key_version')
- )
- self.encryption_config_model = self.datalake_store_models.EncryptionConfig(type=self.encryption_config.get('type'),
- key_vault_meta_info=key_vault_meta_info_model)
-
- if self.identity is not None:
- self.identity_model = self.datalake_store_models.EncryptionIdentity(
- type=self.identity
- )
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- self.account_dict = self.get_datalake_store()
-
- if self.account_dict is not None:
- self.results['state'] = self.account_dict
- else:
- self.results['state'] = dict()
-
- if self.state == 'present':
- if not self.account_dict:
- self.results['state'] = self.create_datalake_store()
- else:
- self.results['state'] = self.update_datalake_store()
- else:
- self.delete_datalake_store()
- self.results['state'] = dict(state='Deleted')
-
- return self.results
-
- def check_name_availability(self):
- self.log('Checking name availability for {0}'.format(self.name))
- try:
- response = self.datalake_store_client.accounts.check_name_availability(self.location, parameters={'name': self.name})
- except Exception as e:
- self.log('Error attempting to validate name.')
- self.fail("Error checking name availability: {0}".format(str(e)))
- if not response.name_available:
- self.log('Error name not available.')
- self.fail("{0} - {1}".format(response.message, response.reason))
-
- def create_datalake_store(self):
- self.log("Creating datalake store {0}".format(self.name))
-
- if not self.location:
- self.fail('Parameter error: location required when creating a datalake store account.')
-
- self.check_name_availability()
- self.results['changed'] = True
-
- if self.check_mode:
- account_dict = dict(
- name=self.name,
- resource_group=self.resource_group,
- location=self.location
- )
- return account_dict
-
- if self.firewall_rules is not None:
- self.firewall_rules_model = list()
- for rule in self.firewall_rules:
- rule_model = self.datalake_store_models.CreateFirewallRuleWithAccountParameters(
- name=rule.get('name'),
- start_ip_address=rule.get('start_ip_address'),
- end_ip_address=rule.get('end_ip_address'))
- self.firewall_rules_model.append(rule_model)
-
- if self.virtual_network_rules is not None:
- self.virtual_network_rules_model = list()
- for vnet_rule in self.virtual_network_rules:
- vnet_rule_model = self.datalake_store_models.CreateVirtualNetworkRuleWithAccountParameters(
- name=vnet_rule.get('name'),
- subnet_id=vnet_rule.get('subnet_id'))
- self.virtual_network_rules_model.append(vnet_rule_model)
-
- parameters = self.datalake_store_models.CreateDataLakeStoreAccountParameters(
- default_group=self.default_group,
- encryption_config=self.encryption_config_model,
- encryption_state=self.encryption_state,
- firewall_allow_azure_ips=self.firewall_allow_azure_ips,
- firewall_rules=self.firewall_rules_model,
- firewall_state=self.firewall_state,
- identity=self.identity_model,
- location=self.location,
- new_tier=self.new_tier,
- tags=self.tags,
- virtual_network_rules=self.virtual_network_rules_model
- )
-
- self.log(str(parameters))
- try:
- poller = self.datalake_store_client.accounts.begin_create(self.resource_group, self.name, parameters)
- self.get_poller_result(poller)
- except Exception as e:
- self.log('Error creating datalake store.')
- self.fail("Failed to create datalake store: {0}".format(str(e)))
-
- return self.get_datalake_store()
-
- def update_datalake_store(self):
- self.log("Updating datalake store {0}".format(self.name))
-
- parameters = self.datalake_store_models.UpdateDataLakeStoreAccountParameters()
-
- if self.tags:
- update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags'])
- if update_tags:
- self.results['changed'] = True
- parameters.tags = self.account_dict['tags']
-
- if self.new_tier and self.account_dict.get('new_tier') != self.new_tier:
- self.results['changed'] = True
- parameters.new_tier = self.new_tier
-
- if self.default_group and self.account_dict.get('default_group') != self.default_group:
- self.results['changed'] = True
- parameters.default_group = self.default_group
-
- if self.encryption_state and self.account_dict.get('encryption_state') != self.encryption_state:
- self.fail("Encryption type cannot be updated.")
-
- if self.encryption_config:
- if (
- self.encryption_config.get('type') == 'UserManaged'
- and self.encryption_config.get('key_vault_meta_info') != self.account_dict.get('encryption_config').get('key_vault_meta_info')
- ):
- self.results['changed'] = True
- key_vault_meta_info_model = self.datalake_store_models.UpdateKeyVaultMetaInfo(
- encryption_key_version=self.encryption_config.get('key_vault_meta_info').get('encryption_key_version')
- )
- encryption_config_model = self.datalake_store_models.UpdateEncryptionConfig = key_vault_meta_info_model
- parameters.encryption_config = encryption_config_model
-
- if self.firewall_state and self.account_dict.get('firewall_state') != self.firewall_state:
- self.results['changed'] = True
- parameters.firewall_state = self.firewall_state
-
- if self.firewall_allow_azure_ips and self.account_dict.get('firewall_allow_azure_ips') != self.firewall_allow_azure_ips:
- self.results['changed'] = True
- parameters.firewall_allow_azure_ips = self.firewall_allow_azure_ips
-
- if self.firewall_rules is not None:
- if not self.compare_lists(self.firewall_rules, self.account_dict.get('firewall_rules')):
- self.firewall_rules_model = list()
- for rule in self.firewall_rules:
- rule_model = self.datalake_store_models.UpdateFirewallRuleWithAccountParameters(
- name=rule.get('name'),
- start_ip_address=rule.get('start_ip_address'),
- end_ip_address=rule.get('end_ip_address'))
- self.firewall_rules_model.append(rule_model)
- self.results['changed'] = True
- parameters.firewall_rules = self.firewall_rules_model
-
- if self.virtual_network_rules is not None:
- if not self.compare_lists(self.virtual_network_rules, self.account_dict.get('virtual_network_rules')):
- self.virtual_network_rules_model = list()
- for vnet_rule in self.virtual_network_rules:
- vnet_rule_model = self.datalake_store_models.UpdateVirtualNetworkRuleWithAccountParameters(
- name=vnet_rule.get('name'),
- subnet_id=vnet_rule.get('subnet_id'))
- self.virtual_network_rules_model.append(vnet_rule_model)
- self.results['changed'] = True
- parameters.virtual_network_rules = self.virtual_network_rules_model
-
- if self.identity_model is not None:
- self.results['changed'] = True
- parameters.identity = self.identity_model
-
- self.log(str(parameters))
- if self.results['changed']:
- try:
- poller = self.datalake_store_client.accounts.begin_update(self.resource_group, self.name, parameters)
- self.get_poller_result(poller)
- except Exception as e:
- self.log('Error creating datalake store.')
- self.fail("Failed to create datalake store: {0}".format(str(e)))
-
- return self.get_datalake_store()
-
- def delete_datalake_store(self):
- self.log('Delete datalake store {0}'.format(self.name))
-
- self.results['changed'] = True if self.account_dict is not None else False
- if not self.check_mode and self.account_dict is not None:
- try:
- status = self.datalake_store_client.accounts.begin_delete(self.resource_group, self.name)
- self.log("delete status: ")
- self.log(str(status))
- except Exception as e:
- self.fail("Failed to delete datalake store: {0}".format(str(e)))
-
- return True
-
- def get_datalake_store(self):
- self.log('Get properties for datalake store {0}'.format(self.name))
- datalake_store_obj = None
- account_dict = None
-
- try:
- datalake_store_obj = self.datalake_store_client.accounts.get(self.resource_group, self.name)
- except ResourceNotFoundError:
- pass
-
- if datalake_store_obj:
- account_dict = self.account_obj_to_dict(datalake_store_obj)
-
- return account_dict
-
- def account_obj_to_dict(self, datalake_store_obj):
- account_dict = dict(
- account_id=datalake_store_obj.account_id,
- creation_time=datalake_store_obj.creation_time,
- current_tier=datalake_store_obj.current_tier,
- default_group=datalake_store_obj.default_group,
- encryption_config=None,
- encryption_provisioning_state=datalake_store_obj.encryption_provisioning_state,
- encryption_state=datalake_store_obj.encryption_state,
- endpoint=datalake_store_obj.endpoint,
- firewall_allow_azure_ips=datalake_store_obj.firewall_allow_azure_ips,
- firewall_rules=None,
- firewall_state=datalake_store_obj.firewall_state,
- id=datalake_store_obj.id,
- identity=None,
- last_modified_time=datalake_store_obj.last_modified_time,
- location=datalake_store_obj.location,
- name=datalake_store_obj.name,
- new_tier=datalake_store_obj.new_tier,
- provisioning_state=datalake_store_obj.provisioning_state,
- state=datalake_store_obj.state,
- tags=datalake_store_obj.tags,
- trusted_id_providers=datalake_store_obj.trusted_id_providers,
- trusted_id_provider_state=datalake_store_obj.trusted_id_provider_state,
- type=datalake_store_obj.type,
- virtual_network_rules=None
- )
-
- account_dict['firewall_rules'] = list()
- if datalake_store_obj.firewall_rules:
- for rule in datalake_store_obj.firewall_rules:
- rule_item = dict(
- name=rule.name,
- start_ip_address=rule.start_ip_address,
- end_ip_address=rule.end_ip_address
- )
- account_dict['firewall_rules'].append(rule_item)
-
- account_dict['virtual_network_rules'] = list()
- if datalake_store_obj.virtual_network_rules:
- for vnet_rule in datalake_store_obj.virtual_network_rules:
- vnet_rule_item = dict(
- name=vnet_rule.name,
- subnet_id=vnet_rule.subnet_id
- )
- account_dict['virtual_network_rules'].append(vnet_rule_item)
-
- if datalake_store_obj.identity:
- account_dict['identity'] = dict(
- type=datalake_store_obj.identity.type,
- principal_id=datalake_store_obj.identity.principal_id,
- tenant_id=datalake_store_obj.identity.tenant_id
- )
-
- if datalake_store_obj.encryption_config:
- if datalake_store_obj.encryption_config.key_vault_meta_info:
- account_dict['encryption_config'] = dict(
- key_vault_meta_info=dict(
- key_vault_resource_id=datalake_store_obj.encryption_config.key_vault_meta_info.key_vault_resource_id,
- encryption_key_name=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_name,
- encryption_key_version=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_version
- )
- )
-
- return account_dict
-
- def compare_lists(self, list1, list2):
- if len(list1) != len(list2):
- return False
- for element in list1:
- if element not in list2:
- return False
- return True
-
-
-def main():
- AzureRMDatalakeStore()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py
deleted file mode 100644
index 8444a4c1c..000000000
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_datalakestore_info.py
+++ /dev/null
@@ -1,468 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2020 David Duque Hernández, (@next-davidduquehernandez)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_datalakestore_info
-version_added: "1.4.0"
-short_description: Get Azure Data Lake Store info
-description:
- - Get Azure Data Lake Store info.
-
-options:
- resource_group:
- description:
- - The name of the Azure resource group.
- type: str
- aliases:
- - resource_group_name
- name:
- description:
- - The name of the Data Lake Store account.
- type: str
-
-extends_documentation_fragment:
- - azure.azcollection.azure
-
-author:
- - David Duque Hernández (@next-davidduquehernandez)
-
-'''
-
-EXAMPLES = '''
-- name: Get Azure Data Lake Store info from resource group 'myResourceGroup' and name 'myDataLakeStore'
- azure_rm_datalakestore_info:
- resource_group: myResourceGroup
- name: myDataLakeStore
-
-- name: Get Azure Data Lake Store info from resource group 'myResourceGroup'
- azure_rm_datalakestore_info:
- resource_group: myResourceGroup
-
-- name: Get Azure Data Lake Store info
- azure_rm_datalakestore_info:
-'''
-
-RETURN = '''
-datalake:
- description:
- - A list of dictionaries containing facts for Azure Data Lake Store.
- returned: always
- type: complex
- contains:
- account_id:
- description:
- - The unique identifier associated with this Data Lake Store account.
- returned: always
- type: str
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- creation_time:
- description:
- - The account creation time.
- returned: always
- type: str
- sample: '2020-01-01T00:00:00.000000+00:00'
- current_tier:
- description:
- - The commitment tier in use for the current month.
- type: str
- sample: Consumption
- default_group:
- description:
- - The default owner group for all new folders and files created in the Data Lake Store account.
- type: str
- sample: null
- encryption_config:
- description:
- - The Key Vault encryption configuration.
- type: complex
- contains:
- type:
- description:
- - The type of encryption configuration being used.
- type: str
- returned: always
- sample: ServiceManaged
- key_vault_meta_info:
- description:
- - The Key Vault information for connecting to user managed encryption keys.
- type: complex
- contains:
- key_vault_resource_id:
- description:
- - The resource identifier for the user managed Key Vault being used to encrypt.
- type: str
- returned: always
- sample: /subscriptions/{subscriptionId}/resourceGroups/myRG/providers/Microsoft.KeyVault/vaults/testkv
- encryption_key_name:
- description:
- - The name of the user managed encryption key.
- type: str
- returned: always
- sample: KeyName
- encryption_key_version:
- description:
- - The version of the user managed encryption key.
- type: str
- returned: always
- sample: 86a1e3b7406f45afa0d54e21eff47e39
- encryption_provisioning_state:
- description:
- - The current state of encryption provisioning for this Data Lake Store account.
- type: str
- sample: Succeeded
- encryption_state:
- description:
- - The current state of encryption for this Data Lake Store account.
- type: str
- sample: Enabled
- endpoint:
- description:
- - The full CName endpoint for this account.
- returned: always
- type: str
- sample: testaccount.azuredatalakestore.net
- firewall_allow_azure_ips:
- description:
- - The current state of allowing or disallowing IPs originating within Azure through the firewall.
- type: str
- sample: Disabled
- firewall_rules:
- description:
- - The list of firewall rules associated with this Data Lake Store account.
- type: list
- contains:
- name:
- description:
- - The resource name.
- type: str
- returned: always
- sample: Example Name
- start_ip_address:
- description:
- - The start IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- returned: always
- sample: 192.168.1.1
- end_ip_address:
- description:
- - The end IP address for the firewall rule.
- - This can be either ipv4 or ipv6.
- - Start and End should be in the same protocol.
- type: str
- returned: always
- sample: 192.168.1.254
- firewall_state:
- description:
- - The current state of the IP address firewall for this Data Lake Store account.
- type: str
- sample: Enabled
- id:
- description:
- - The resource identifier.
- returned: always
- type: str
- sample: /subscriptions/{subscriptionId}/resourceGroups/myResourceGroup/providers/Microsoft.DataLakeStore/accounts/testaccount
- identity:
- description:
- - The Key Vault encryption identity, if any.
- type: complex
- contains:
- type:
- description:
- - The type of encryption being used.
- type: str
- sample: SystemAssigned
- principal_id:
- description:
- - The principal identifier associated with the encryption.
- type: str
- sample: 00000000-0000-0000-0000-000000000000
- tenant_id:
- description:
- - The tenant identifier associated with the encryption.
- type: str
- sample: 00000000-0000-0000-0000-000000000000
- last_modified_time:
- description:
- - The account last modified time.
- returned: always
- type: str
- sample: '2020-01-01T00:00:00.000000+00:00'
- location:
- description:
- - The resource location.
- returned: always
- type: str
- sample: westeurope
- name:
- description:
- - The resource name.
- returned: always
- type: str
- sample: testaccount
- new_tier:
- description:
- - The commitment tier to use for next month.
- type: str
- sample: Consumption
- provisioning_state:
- description:
- - The provisioning status of the Data Lake Store account.
- returned: always
- type: str
- sample: Succeeded
- state:
- description:
- - The state of the Data Lake Store account.
- returned: always
- type: str
- sample: Active
- tags:
- description:
- - The resource tags.
- returned: always
- type: dict
- sample: { "tag1":"abc" }
- trusted_id_providers:
- description:
- - The current state of the trusted identity provider feature for this Data Lake Store account.
- type: list
- contains:
- id:
- description:
- - The resource identifier.
- type: str
- name:
- description:
- - The resource name.
- type: str
- type:
- description:
- - The resource type.
- type: str
- id_provider:
- description:
- - The URL of this trusted identity provider.
- type: str
- trusted_id_provider_state:
- description:
- - The list of trusted identity providers associated with this Data Lake Store account.
- type: str
- sample: Enabled
- type:
- description:
- - The resource type.
- returned: always
- type: str
- sample: Microsoft.DataLakeStore/accounts
- virtual_network_rules:
- description:
- - The list of virtual network rules associated with this Data Lake Store account.
- type: list
- contains:
- name:
- description:
- - The resource name.
- type: str
- sample: Rule Name
- subnet_id:
- description:
- - The resource identifier for the subnet.
- type: str
- sample: /subscriptions/{subscriptionId}/resourceGroups/myRG/providers/Microsoft.Network/virtualNetworks/vnet/subnets/default
-'''
-
-try:
- from azure.core.exceptions import ResourceNotFoundError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-class AzureRMDatalakeStoreInfo(AzureRMModuleBase):
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str', aliases=['resource_group_name'])
- )
-
- self.results = dict(
- changed=False,
- datalake=[]
- )
-
- self.name = None
- self.resource_group = None
-
- super(AzureRMDatalakeStoreInfo, self).__init__(self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- self.module.deprecate("The azure_rm_datalakestore_info.py will deprecated. Azure Data Lake Storage Gen1 retired on February 29,2024", version=(2.3, ))
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
-
- results = []
- if self.name:
- results = self.get_datalake_store()
- elif self.resource_group:
- results = self.list_resource_group()
- else:
- results = self.list_all()
-
- self.results['datalake'] = results
- return self.results
-
- def get_datalake_store(self):
- self.log('Get properties for datalake store {0}'.format(self.name))
- datalake_store_obj = None
-
- try:
- datalake_store_obj = self.datalake_store_client.accounts.get(self.resource_group, self.name)
- except ResourceNotFoundError:
- pass
-
- if datalake_store_obj:
- return [self.account_obj_to_dict(datalake_store_obj)]
-
- return list()
-
- def list_resource_group(self):
- self.log('Get basic properties for datalake store in resource group {0}'.format(self.resource_group))
- datalake_store_obj = None
- results = list()
-
- try:
- datalake_store_obj = self.datalake_store_client.accounts.list_by_resource_group(self.resource_group)
- except Exception:
- pass
-
- if datalake_store_obj:
- for datalake_item in datalake_store_obj:
- results.append(self.account_obj_to_dict_basic(datalake_item))
- return results
-
- return list()
-
- def list_all(self):
- self.log('Get basic properties for all datalake store')
- datalake_store_obj = None
- results = list()
-
- try:
- datalake_store_obj = self.datalake_store_client.accounts.list()
- except Exception:
- pass
-
- if datalake_store_obj:
- for datalake_item in datalake_store_obj:
- results.append(self.account_obj_to_dict_basic(datalake_item))
- return results
-
- return list()
-
- def account_obj_to_dict(self, datalake_store_obj):
- account_dict = dict(
- account_id=datalake_store_obj.account_id,
- creation_time=datalake_store_obj.creation_time,
- current_tier=datalake_store_obj.current_tier,
- default_group=datalake_store_obj.default_group,
- encryption_config=dict(type=datalake_store_obj.encryption_config.type,
- key_vault_meta_info=None),
- encryption_provisioning_state=datalake_store_obj.encryption_provisioning_state,
- encryption_state=datalake_store_obj.encryption_state,
- endpoint=datalake_store_obj.endpoint,
- firewall_allow_azure_ips=datalake_store_obj.firewall_allow_azure_ips,
- firewall_rules=None,
- firewall_state=datalake_store_obj.firewall_state,
- id=datalake_store_obj.id,
- identity=None,
- last_modified_time=datalake_store_obj.last_modified_time,
- location=datalake_store_obj.location,
- name=datalake_store_obj.name,
- new_tier=datalake_store_obj.new_tier,
- provisioning_state=datalake_store_obj.provisioning_state,
- state=datalake_store_obj.state,
- tags=datalake_store_obj.tags,
- trusted_id_providers=datalake_store_obj.trusted_id_providers,
- trusted_id_provider_state=datalake_store_obj.trusted_id_provider_state,
- type=datalake_store_obj.type,
- virtual_network_rules=None
- )
-
- account_dict['firewall_rules'] = list()
- for rule in datalake_store_obj.firewall_rules:
- rule_item = dict(
- name=rule.name,
- start_ip_address=rule.start_ip_address,
- end_ip_address=rule.end_ip_address
- )
- account_dict['firewall_rules'].append(rule_item)
-
- account_dict['virtual_network_rules'] = list()
- for vnet_rule in datalake_store_obj.virtual_network_rules:
- vnet_rule_item = dict(
- name=vnet_rule.name,
- subnet_id=vnet_rule.subnet_id
- )
- account_dict['virtual_network_rules'].append(vnet_rule_item)
-
- if datalake_store_obj.identity:
- account_dict['identity'] = dict(
- type=datalake_store_obj.identity.type,
- principal_id=datalake_store_obj.identity.principal_id,
- tenant_id=datalake_store_obj.identity.tenant_id
- )
-
- if datalake_store_obj.encryption_config.key_vault_meta_info:
- account_dict['encryption_config'] = dict(
- key_vault_meta_info=dict(
- key_vault_resource_id=datalake_store_obj.encryption_config.key_vault_meta_info.key_vault_resource_id,
- encryption_key_name=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_name,
- encryption_key_version=datalake_store_obj.encryption_config.key_vault_meta_info.encryption_key_version
- )
- )
-
- return account_dict
-
- def account_obj_to_dict_basic(self, datalake_store_obj):
- account_dict = dict(
- account_id=datalake_store_obj.account_id,
- creation_time=datalake_store_obj.creation_time,
- endpoint=datalake_store_obj.endpoint,
- id=datalake_store_obj.id,
- last_modified_time=datalake_store_obj.last_modified_time,
- location=datalake_store_obj.location,
- name=datalake_store_obj.name,
- provisioning_state=datalake_store_obj.provisioning_state,
- state=datalake_store_obj.state,
- tags=datalake_store_obj.tags,
- type=datalake_store_obj.type
- )
-
- return account_dict
-
-
-def main():
- AzureRMDatalakeStoreInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py
index 2c473778c..c6c56291a 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_loadbalancer.py
@@ -328,99 +328,6 @@ options:
- Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination.
- This element is only used when I(protocol=Tcp).
type: bool
- public_ip_address_name:
- description:
- - (deprecated) Name of an existing public IP address object to associate with the security group.
- - This option has been deprecated, and will be removed in 2.9. Use I(frontend_ip_configurations) instead.
- type: str
- aliases:
- - public_ip_address
- - public_ip_name
- - public_ip
- probe_port:
- description:
- - (deprecated) The port that the health probe will use.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- type: int
- probe_protocol:
- description:
- - (deprecated) The protocol to use for the health probe.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- type: str
- choices:
- - Tcp
- - Http
- - Https
- probe_interval:
- description:
- - (deprecated) Time (in seconds) between endpoint health probes.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- type: int
- default: 15
- probe_fail_count:
- description:
- - (deprecated) The amount of probe failures for the load balancer to make a health determination.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- default: 3
- type: int
- probe_request_path:
- description:
- - (deprecated) The URL that an HTTP probe or HTTPS probe will use (only relevant if I(probe_protocol=Http) or I(probe_protocol=Https)).
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- type: str
- protocol:
- description:
- - (deprecated) The protocol (TCP or UDP) that the load balancer will use.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- type: str
- choices:
- - Tcp
- - Udp
- load_distribution:
- description:
- - (deprecated) The type of load distribution that the load balancer will employ.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- type: str
- choices:
- - Default
- - SourceIP
- - SourceIPProtocol
- frontend_port:
- description:
- - (deprecated) Frontend port that will be exposed for the load balancer.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- type: int
- backend_port:
- description:
- - (deprecated) Backend port that will be exposed for the load balancer.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- type: int
- idle_timeout:
- description:
- - (deprecated) Timeout for TCP idle connection in minutes.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- type: int
- default: 4
- natpool_frontend_port_start:
- description:
- - (deprecated) Start of the port range for a NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
- type: int
- natpool_frontend_port_end:
- description:
- - (deprecated) End of the port range for a NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
- type: int
- natpool_backend_port:
- description:
- - (deprecated) Backend port used by the NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
- type: int
- natpool_protocol:
- description:
- - (deprecated) The protocol for the NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
- type: str
extends_documentation_fragment:
- azure.azcollection.azure
- azure.azcollection.azure_tags
@@ -710,58 +617,6 @@ class AzureRMLoadBalancer(AzureRMModuleBase):
type='list',
elements='dict',
options=load_balancing_rule_spec
- ),
- public_ip_address_name=dict(
- type='str',
- aliases=['public_ip_address', 'public_ip_name', 'public_ip']
- ),
- probe_port=dict(
- type='int'
- ),
- probe_protocol=dict(
- type='str',
- choices=['Tcp', 'Http', 'Https']
- ),
- probe_interval=dict(
- type='int',
- default=15
- ),
- probe_fail_count=dict(
- type='int',
- default=3
- ),
- probe_request_path=dict(
- type='str'
- ),
- protocol=dict(
- type='str',
- choices=['Tcp', 'Udp']
- ),
- load_distribution=dict(
- type='str',
- choices=['Default', 'SourceIP', 'SourceIPProtocol']
- ),
- frontend_port=dict(
- type='int'
- ),
- backend_port=dict(
- type='int'
- ),
- idle_timeout=dict(
- type='int',
- default=4
- ),
- natpool_frontend_port_start=dict(
- type='int'
- ),
- natpool_frontend_port_end=dict(
- type='int'
- ),
- natpool_backend_port=dict(
- type='int'
- ),
- natpool_protocol=dict(
- type='str'
)
)
@@ -775,22 +630,7 @@ class AzureRMLoadBalancer(AzureRMModuleBase):
self.inbound_nat_rules = None
self.inbound_nat_pools = None
self.load_balancing_rules = None
- self.public_ip_address_name = None
self.state = None
- self.probe_port = None
- self.probe_protocol = None
- self.probe_interval = None
- self.probe_fail_count = None
- self.probe_request_path = None
- self.protocol = None
- self.load_distribution = None
- self.frontend_port = None
- self.backend_port = None
- self.idle_timeout = None
- self.natpool_frontend_port_start = None
- self.natpool_frontend_port_end = None
- self.natpool_backend_port = None
- self.natpool_protocol = None
self.tags = None
self.results = dict(changed=False, state=dict())
@@ -814,54 +654,6 @@ class AzureRMLoadBalancer(AzureRMModuleBase):
load_balancer = self.get_load_balancer()
if self.state == 'present':
- # compatible parameters
- is_compatible_param = not self.frontend_ip_configurations and not self.backend_address_pools and not self.probes and not self.inbound_nat_pools
- is_compatible_param = is_compatible_param and not load_balancer # the instance should not be exist
- is_compatible_param = is_compatible_param or self.public_ip_address_name or self.probe_protocol or self.natpool_protocol or self.protocol
- if is_compatible_param:
- self.deprecate('Discrete load balancer config settings are deprecated and will be removed.'
- ' Use frontend_ip_configurations, backend_address_pools, probes, inbound_nat_pools lists instead.', version=(2, 9))
- frontend_ip_name = 'frontendip0'
- backend_address_pool_name = 'backendaddrp0'
- prob_name = 'prob0'
- inbound_nat_pool_name = 'inboundnatp0'
- lb_rule_name = 'lbr'
- self.frontend_ip_configurations = [dict(
- name=frontend_ip_name,
- public_ip_address=self.public_ip_address_name
- )]
- self.backend_address_pools = [dict(
- name=backend_address_pool_name
- )]
- self.probes = [dict(
- name=prob_name,
- port=self.probe_port,
- protocol=self.probe_protocol,
- interval=self.probe_interval,
- fail_count=self.probe_fail_count,
- request_path=self.probe_request_path
- )] if self.probe_protocol else None
- self.inbound_nat_pools = [dict(
- name=inbound_nat_pool_name,
- frontend_ip_configuration_name=frontend_ip_name,
- protocol=self.natpool_protocol,
- frontend_port_range_start=self.natpool_frontend_port_start,
- frontend_port_range_end=self.natpool_frontend_port_end,
- backend_port=self.natpool_backend_port
- )] if self.natpool_protocol else None
- self.load_balancing_rules = [dict(
- name=lb_rule_name,
- frontend_ip_configuration=frontend_ip_name,
- backend_address_pool=backend_address_pool_name,
- probe=prob_name,
- protocol=self.protocol,
- load_distribution=self.load_distribution,
- frontend_port=self.frontend_port,
- backend_port=self.backend_port,
- idle_timeout=self.idle_timeout,
- enable_floating_ip=False,
- )] if self.protocol else None
-
# create new load balancer structure early, so it can be easily compared
if not load_balancer:
frontend_ip_configurations_param = [self.network_models.FrontendIPConfiguration(
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway.py
new file mode 100644
index 000000000..6f88fff94
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_localnetworkgateway
+
+version_added: "2.4.0"
+
+short_description: Manage Azure Local Network Gateway in a resource group
+
+description:
+ - Create, update or delete Azure Local Network Gateway in a resource group
+
+options:
+ resource_group:
+ description:
+ - The local network gateway's resource group.
+ type: str
+ required: true
+ name:
+ description:
+ - The name of the local network gateway.
+ type: str
+ required: true
+ location:
+ description:
+ - The location of the local network gateway.
+ type: str
+ local_network_address_space:
+ description:
+ - Local network site address space.
+ type: dict
+ suboptions:
+ address_prefixes:
+ description:
+ - A list of address blocks reserved for this virtual network in CIDR notation.
+ type: list
+ elements: str
+ gateway_ip_address:
+ description:
+ - IP address of local network gateway.
+ type: str
+ fqdn:
+ description:
+ - FQDN of local network gateway.
+ type: str
+ bgp_settings:
+ description:
+ - Local network gateway's BGP speaker settings.
+ type: dict
+ suboptions:
+ asn:
+ description:
+ - The BGP speaker's ASN.
+ type: int
+ bgp_peering_address:
+ description:
+ - The BGP peering address and BGP identifier of this BGP speaker.
+ type: str
+ peer_weight:
+ description:
+ - The weight added to routes learned from this BGP speaker.
+ type: int
+ state:
+ description:
+ - Use C(present) to create or update a local network gateway.
+ - Use C(absent) to delete the local network gateway.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+extends_documentation_fragment:
+ - azure.azcollection.azure
+ - azure.azcollection.azure_tags
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred Sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Create a new local network gateway
+ azure_rm_localnetworkgateway:
+ resource_group: "{{ resource_group }}"
+ name: "localgateway-name"
+ local_network_address_space:
+ address_prefixes:
+ - 10.0.0.0/24
+ - 20.0.0.0/24
+ fqdn: fredtest.com
+ tags:
+ key: value
+ bgp_settings:
+ asn: 8
+ bgp_peering_address: 10.3.0.1
+ peer_weight: 3
+
+- name: Delete local network gateway
+ azure_rm_localnetworkgateway:
+ resource_group: "{{ resource_group }}"
+ name: "localgateway-name"
+ state: absent
+'''
+
+RETURN = '''
+state:
+ description:
+ - Current state of the Azure Local Network Gateway resource.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxxx-xxxx/resourceGroups/testRG/providers/Microsoft.Network/localNetworkGateways/testgateway"
+ bgp_settings:
+ description:
+ - Local network gateway's BGP speaker settings.
+ type: complex
+ contains:
+ asn:
+ description:
+ - The BGP speaker's ASN.
+ type: int
+ returned: always
+ sample: 10
+ bgp_peering_address:
+ description:
+ - The BGP peering address and BGP identifier of this BGP speaker.
+ type: str
+ returned: always
+ sample: 10.0.0.3
+ peer_weight:
+ description:
+ - The weight added to routes learned from this BGP speaker.
+ type: int
+ returned: always
+ sample: 0
+ fqdn:
+ description:
+ - FQDN of local network gateway.
+ type: str
+ returned: always
+ sample: testfqdn.com
+ gateway_ip_address:
+ description:
+ - IP address of local network gateway.
+ type: str
+ returned: always
+ sample: 10.1.1.1
+ etag:
+ description:
+ - A unique read-only string that changes whenever the resource is updated.
+ type: str
+ returned: always
+ sample: b5a32693-2e75-49e0-9137-ded19db658d6
+ local_network_address_space:
+ description:
+ - Local network site address space.
+ type: complex
+ contains:
+ address_prefixes:
+ description:
+ - A list of address blocks reserved for this virtual network in CIDR notation.
+ type: list
+ returned: always
+ sample: ["10.0.0.0/24", "20.0.0.0/24"]
+ location:
+ description:
+ - The resource location.
+ type: str
+ returned: always
+ sample: eastus
+ name:
+ description:
+ - The resource name.
+ type: str
+ returned: always
+ sample: testgateway
+ provisioning_state:
+ description:
+ - The provisioning state of the local network gateway resource.
+ type: str
+ returned: always
+ sample: Succeeded
+ tags:
+ description:
+ - The resource tags.
+ type: str
+ returned: always
+ sample: {'key1': 'value1', 'key2': 'value2'}
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: Microsoft.Network/localNetworkGateways
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+ from azure.core.polling import LROPoller
+except Exception:
+ # handled in azure_rm_common
+ pass
+
+
+bgp_settings_spec = dict(
+ asn=dict(type='int'),
+ bgp_peering_address=dict(type='str'),
+ peer_weight=dict(type='int'),
+)
+
+
+local_network_address_space_spec = dict(
+ address_prefixes=dict(type='list', elements='str')
+)
+
+
+class AzureRMNetworkGateWay(AzureRMModuleBase):
+ """Utility class to get Azure Kubernetes Service Credentials facts"""
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ name=dict(type='str', required=True),
+ resource_group=dict(type='str', required=True),
+ location=dict(type='str'),
+ local_network_address_space=dict(type='dict', options=local_network_address_space_spec),
+ gateway_ip_address=dict(type='str'),
+ fqdn=dict(type='str'),
+ bgp_settings=dict(type='dict', options=bgp_settings_spec),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+
+ self.name = None
+ self.location = None
+ self.local_network_address_space = None
+ self.gateway_ip_address = None
+ self.fqdn = None
+ self.tags = None
+ self.bgp_settings = None
+
+ self.results = dict(
+ changed=False,
+ state=[],
+ )
+ mutually_exclusive = [['gateway_ip_address', 'fqdn']]
+
+ super(AzureRMNetworkGateWay, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ supports_tags=True,
+ facts_module=False)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec) + ['tags']:
+ setattr(self, key, kwargs[key])
+
+ if not self.location:
+ # Set default location
+ resource_group = self.get_resource_group(self.resource_group)
+ self.location = resource_group.location
+
+ old_response = self.get_local_network_gateway()
+ changed = False
+ update_tags = False
+
+ response = None
+ if self.state == 'present':
+ if old_response is not None:
+ if self.fqdn is not None and self.fqdn != old_response['fqdn']:
+ changed = True
+ else:
+ self.fqdn = old_response['fqdn']
+ if self.gateway_ip_address is not None and self.gateway_ip_address != old_response['gateway_ip_address']:
+ changed = True
+ else:
+ self.gateway_ip_address = old_response['gateway_ip_address']
+ if self.bgp_settings is not None and\
+ not all(self.bgp_settings.get(key) == old_response['bgp_settings'].get(key) for key in self.bgp_settings.keys()):
+ changed = True
+ if self.local_network_address_space is not None:
+ if old_response['local_network_address_space'].get('address_prefixes') is not None:
+ new_address = list(set(self.local_network_address_space['address_prefixes'] +
+ old_response['local_network_address_space']['address_prefixes']))
+ if len(new_address) > len(old_response['local_network_address_space'].get('address_prefixes')):
+ changed = True
+ self.local_network_address_space['address_prefixes'] = new_address
+ else:
+ changed = True
+ else:
+ self.local_network_address_space['address_prefixes'] = old_response['local_network_address_space'].get('address_prefixes')
+
+ update_tags, new_tags = self.update_tags(old_response.get('tags'))
+ if update_tags:
+ # response = self.update_local_network_gateway_tags(new_tags)
+ self.fail("Can't update the local network gateway tags, Exception code as AllPropertiesAreReadOnly")
+ changed = True
+ else:
+ changed = True
+
+ local_network_address_space = None
+ if self.local_network_address_space is not None:
+ local_network_address_space = self.network_models.AddressSpace(address_prefixes=self.local_network_address_space['address_prefixes'])
+ bgp_settings = None
+ if self.bgp_settings is not None:
+ bgp_settings = self.network_models.BgpSettings(asn=self.bgp_settings.get('asn'),
+ bgp_peering_address=self.bgp_settings.get('bgp_peering_address'),
+ peer_weight=self.bgp_settings.get('peer_weight'))
+
+ gateway_resource = self.network_models.LocalNetworkGateway(location=self.location,
+ tags=self.tags,
+ gateway_ip_address=self.gateway_ip_address,
+ fqdn=self.fqdn,
+ local_network_address_space=local_network_address_space,
+ bgp_settings=bgp_settings)
+ if changed:
+ if not self.check_mode:
+ response = self.create_or_update_local_network_gateway(gateway_resource)
+
+ if old_response is not None:
+ update_tags, new_tags = self.update_tags(old_response.get('tags'))
+ if update_tags:
+ if not self.check_mode:
+ response = self.update_local_network_gateway_tags(new_tags)
+ changed = True
+ else:
+ if not self.check_mode:
+ if old_response is not None:
+ self.delete_local_network_gateway()
+ changed = True
+ response = None
+ else:
+ changed = True
+
+ if response is None:
+ response = old_response
+ self.results['state'] = response
+ self.results['changed'] = changed
+ return self.results
+
+ def get_local_network_gateway(self):
+ """Gets the specified local network gateway in a resource group"""
+ response = None
+ try:
+ response = self.network_client.local_network_gateways.get(self.resource_group, self.name)
+ except HttpResponseError as ec:
+ self.log("Gets the specified local network gateway in a resource group Failed, Exception as {0}".format(ec))
+ return None
+ return self.format_response(response)
+
+ def create_or_update_local_network_gateway(self, body):
+ """Create or Update local network gateway"""
+ response = None
+ try:
+ response = self.network_client.local_network_gateways.begin_create_or_update(self.resource_group, self.name, body)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+ except HttpResponseError as ec:
+ self.fail("Create or Updated a local network gateway in a resource group Failed, Exception as {0}".format(ec))
+
+ return self.format_response(response)
+
+ def update_local_network_gateway_tags(self, tags):
+ """Updates a local network gateway tags"""
+ response = None
+ try:
+ response = self.network_client.local_network_gateways.update_tags(self.resource_group, self.name, tags)
+ except HttpResponseError as ec:
+ self.fail("Update a local network gateway tags Failed, Exception as {0}".format(ec))
+ return self.format_response(response)
+
+ def delete_local_network_gateway(self):
+ """Deletes the specified local network gateway"""
+ try:
+ self.network_client.local_network_gateways.begin_delete(self.resource_group, self.name)
+ except HttpResponseError as ec:
+ self.fail("Deletes the specified local network gateway Failed, Exception as {0}".format(ec))
+ return None
+
+ def format_response(self, item):
+ result = dict(
+ id=item.id,
+ name=item.name,
+ location=item.location,
+ type=item.type,
+ tags=item.tags,
+ etag=item.etag,
+ local_network_address_space=dict(),
+ gateway_ip_address=item.gateway_ip_address,
+ fqdn=item.fqdn,
+ provisioning_state=item.provisioning_state,
+ bgp_settings=dict(),
+ )
+
+ if item.local_network_address_space is not None:
+ result['local_network_address_space']['address_prefixes'] = item.local_network_address_space.address_prefixes
+ if item.bgp_settings is not None:
+ result['bgp_settings']['asn'] = item.bgp_settings.asn
+ result['bgp_settings']['bgp_peering_address'] = item.bgp_settings.bgp_peering_address
+ result['bgp_settings']['peer_weight'] = item.bgp_settings.peer_weight
+ return result
+
+
+def main():
+ """Main module execution code path"""
+
+ AzureRMNetworkGateWay()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway_info.py
new file mode 100644
index 000000000..445c2a237
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_localnetworkgateway_info.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_localnetworkgateway_info
+
+version_added: "2.4.0"
+
+short_description: Gets or list the specified local network gateway in a resource group
+
+description:
+ - Gets or list the specified local network gateway in a resource group.
+
+options:
+ resource_group:
+ description:
+ - The local network gateway's resource group.
+ type: str
+ required: true
+ name:
+ description:
+ - The name of the local network gateway.
+ type: str
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred Sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Gets the specified local network gateway in a resource group
+ azure_rm_localnetworkgateway_info:
+ resource_group: "{{ resource_group }}"
+ name: "{{ local_networkgateway_name }}"
+
+- name: Gets all the local network gateways in a resource group
+ azure_rm_localnetworkgateway_info:
+ resource_group: "{{ resource_group }}"
+
+- name: Gets all the local network gateways in a resource group and filter by tags
+ azure_rm_localnetworkgateway_info:
+ resource_group: "{{ resource_group }}"
+ tags:
+ - foo
+'''
+
+RETURN = '''
+state:
+ description:
+ - Current state of the Azure Local Network Gateway resource.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxxx-xxxx/resourceGroups/testRG/providers/Microsoft.Network/localNetworkGateways/testgateway"
+ bgp_settings:
+ description:
+ - Local network gateway's BGP speaker settings.
+ type: complex
+ contains:
+ asn:
+ description:
+ - The BGP speaker's ASN.
+ type: int
+ returned: always
+ sample: 10
+ bgp_peering_address:
+ description:
+ - The BGP peering address and BGP identifier of this BGP speaker.
+ type: str
+ returned: always
+ sample: 10.0.0.3
+ peer_weight:
+ description:
+ - The weight added to routes learned from this BGP speaker.
+ type: int
+ returned: always
+ sample: 0
+ fqdn:
+ description:
+ - FQDN of local network gateway.
+ type: str
+ returned: always
+ sample: testfqdn.com
+ gateway_ip_address:
+ description:
+ - IP address of local network gateway.
+ type: str
+ returned: always
+ sample: 10.1.1.1
+ etag:
+ description:
+ - A unique read-only string that changes whenever the resource is updated.
+ type: str
+ returned: always
+ sample: b5a32693-2e75-49e0-9137-ded19db658d6
+ local_network_address_space:
+ description:
+ - Local network site address space.
+ type: complex
+ contains:
+ address_prefixes:
+ description:
+ - A list of address blocks reserved for this virtual network in CIDR notation.
+ type: list
+ returned: always
+ sample: ["10.0.0.0/24", "20.0.0.0/24"]
+ location:
+ description:
+ - The resource location.
+ type: str
+ returned: always
+ sample: eastus
+ name:
+ description:
+ - The resource name.
+ type: str
+ returned: always
+ sample: testgateway
+ provisioning_state:
+ description:
+ - The provisioning state of the local network gateway resource.
+ type: str
+ returned: always
+ sample: Succeeded
+ tags:
+ description:
+ - The resource tags.
+ type: str
+ returned: always
+ sample: {'key1': 'value1', 'key2': 'value2'}
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: Microsoft.Network/localNetworkGateways
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except Exception:
+ # handled in azure_rm_common
+ pass
+
+
+class AzureRMNetworkGateWayInfo(AzureRMModuleBase):
+ """Utility class to get Azure Kubernetes Service Credentials facts"""
+
+ def __init__(self):
+
+ self.module_args = dict(
+ name=dict(type='str'),
+ resource_group=dict(type='str', required=True),
+ tags=dict(type='list', elements='str'),
+ )
+
+ self.name = None
+ self.tags = None
+
+ self.results = dict(
+ changed=False,
+ state=[],
+ )
+
+ super(AzureRMNetworkGateWayInfo, self).__init__(derived_arg_spec=self.module_args,
+ supports_check_mode=True,
+ supports_tags=False,
+ facts_module=True)
+
+ def exec_module(self, **kwargs):
+
+ for key in self.module_args:
+ setattr(self, key, kwargs[key])
+
+ if self.name is not None:
+ self.results['state'] = self.get_local_network_gateway()
+ else:
+ self.results['state'] = self.list_local_network_gateway()
+
+ return self.results
+
+ def get_local_network_gateway(self):
+ """Gets the specified local network gateway in a resource group"""
+ response = None
+
+ try:
+ response = self.network_client.local_network_gateways.get(self.resource_group, self.name)
+ except HttpResponseError as ec:
+ self.log("Gets the specified local network gateway in a resource group Failed, Exception as {0}".format(ec))
+ if response and self.has_tags(response.tags, self.tags):
+ return [self.format_response(response)]
+ else:
+ return []
+
+ def list_local_network_gateway(self):
+ """Gets all the local network gateways in a resource group"""
+ response = None
+
+ try:
+ response = self.network_client.local_network_gateways.list(self.resource_group)
+ except HttpResponseError as ec:
+ self.log("Gets all the local network gateways in a resource group Failed, Exception as {0}".format(ec))
+
+ if response:
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(self.format_response(item))
+ return results
+ else:
+ return []
+
+ def format_response(self, item):
+ result = dict(
+ id=item.id,
+ name=item.name,
+ location=item.location,
+ type=item.type,
+ tags=item.tags,
+ etag=item.etag,
+ local_network_address_space=dict(),
+ gateway_ip_address=item.gateway_ip_address,
+ fqdn=item.fqdn,
+ provisioning_state=item.provisioning_state,
+ bgp_settings=dict(),
+ )
+
+ if item.local_network_address_space is not None:
+ result['local_network_address_space']['address_prefixes'] = item.local_network_address_space.address_prefixes
+ if item.bgp_settings is not None:
+ result['bgp_settings']['asn'] = item.bgp_settings.asn
+ result['bgp_settings']['bgp_peering_address'] = item.bgp_settings.bgp_peering_address
+ result['bgp_settings']['peer_weight'] = item.bgp_settings.peer_weight
+ return result
+
+
+def main():
+ """Main module execution code path"""
+
+ AzureRMNetworkGateWayInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py
index 3343d5ac1..beba7810b 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_networkinterface.py
@@ -79,47 +79,6 @@ options:
- Windows
- Linux
default: Linux
- private_ip_address:
- description:
- - (Deprecate) Valid IPv4 address that falls within the specified subnet.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- type: str
- private_ip_allocation_method:
- description:
- - (Deprecate) Whether or not the assigned IP address is permanent.
- - When creating a network interface, if you specify I(private_ip_address=Static), you must provide a value for I(private_ip_address).
- - You can update the allocation method to C(Static) after a dynamic private IP address has been assigned.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- default: Dynamic
- type: str
- choices:
- - Dynamic
- - Static
- public_ip:
- description:
- - (Deprecate) When creating a network interface, if no public IP address name is provided a default public IP address will be created.
- - Set to C(false) if you do not want a public IP address automatically created.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- type: bool
- default: 'yes'
- public_ip_address_name:
- description:
- - (Deprecate) Name of an existing public IP address object to associate with the security group.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- type: str
- aliases:
- - public_ip_address
- - public_ip_name
- public_ip_allocation_method:
- description:
- - (Deprecate) If a I(public_ip_address_name) is not provided, a default public IP address will be created.
- - The allocation method determines whether or not the public IP address assigned to the network interface is permanent.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- type: str
- choices:
- - Dynamic
- - Static
- default: Dynamic
ip_configurations:
description:
- List of IP configurations. Each configuration object should include
@@ -323,7 +282,6 @@ EXAMPLES = '''
virtual_network: vnet001
subnet_name: subnet001
create_with_security_group: false
- public_ip: false
ip_configurations:
- name: default
primary: true
@@ -614,13 +572,8 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
create_with_security_group=dict(type='bool', default=True),
security_group=dict(type='raw', aliases=['security_group_name']),
state=dict(default='present', choices=['present', 'absent']),
- private_ip_address=dict(type='str'),
- private_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
- public_ip_address_name=dict(type='str', aliases=['public_ip_address', 'public_ip_name']),
- public_ip=dict(type='bool', default=True),
subnet_name=dict(type='str', aliases=['subnet']),
virtual_network=dict(type='raw', aliases=['virtual_network_name']),
- public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
ip_configurations=dict(type='list', default=[], elements='dict', options=ip_configuration_spec),
os_type=dict(type='str', choices=['Windows', 'Linux'], default='Linux'),
open_ports=dict(type='list', elements='str'),
@@ -638,13 +591,8 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
self.create_with_security_group = None
self.enable_accelerated_networking = None
self.security_group = None
- self.private_ip_address = None
- self.private_ip_allocation_method = None
- self.public_ip_address_name = None
- self.public_ip = None
self.subnet_name = None
self.virtual_network = None
- self.public_ip_allocation_method = None
self.state = None
self.tags = None
self.os_type = None
@@ -703,17 +651,13 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
if len(asgs) > 0:
config['application_security_groups'] = asgs
+ # If ip_confiurations is not specified then provide the default
+ # private interface
if self.state == 'present' and not self.ip_configurations:
- # construct the ip_configurations array for compatible
- self.deprecate('Setting ip_configuration flatten is deprecated and will be removed.'
- ' Using ip_configurations list to define the ip configuration', version=(2, 9))
self.ip_configurations = [
dict(
- private_ip_address=self.private_ip_address,
- private_ip_allocation_method=self.private_ip_allocation_method,
- public_ip_address_name=self.public_ip_address_name if self.public_ip else None,
- public_ip_allocation_method=self.public_ip_allocation_method,
name='default',
+ private_ip_allocation_method='Dynamic',
primary=True
)
]
@@ -875,7 +819,7 @@ class AzureRMNetworkInterface(AzureRMModuleBase):
def get_or_create_public_ip_address(self, ip_config):
name = ip_config.get('public_ip_address_name')
- if not (self.public_ip and name):
+ if not name:
return None
pip = self.get_public_ip_address(name)
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexiblefirewallrule.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexiblefirewallrule.py
index c73843c46..1fba876f9 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexiblefirewallrule.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexiblefirewallrule.py
@@ -113,8 +113,6 @@ try:
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
from azure.core.exceptions import ResourceNotFoundError
from azure.core.polling import LROPoller
- import logging
- logging.basicConfig(filename='log.log', level=logging.INFO)
except ImportError:
# This is handled in azure_rm_common
pass
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver.py
index 335dc53c8..9d443deec 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver.py
@@ -201,6 +201,42 @@ options:
- Whether to start the Post gresql server.
type: bool
default: False
+ identity:
+ description:
+ - Identity for the Server.
+ type: dict
+ version_added: '2.4.0'
+ suboptions:
+ type:
+ description:
+ - Type of the managed identity
+ required: false
+ choices:
+ - UserAssigned
+ - None
+ default: None
+ type: str
+ user_assigned_identities:
+ description:
+ - User Assigned Managed Identities and its options
+ required: false
+ type: dict
+ default: {}
+ suboptions:
+ id:
+ description:
+ - List of the user assigned identities IDs associated to the VM
+ required: false
+ type: list
+ elements: str
+ default: []
+ append:
+ description:
+ - If the list of identities has to be appended to current identities (true) or if it has to replace current identities (false)
+ required: false
+ type: bool
+ default: True
+
extends_documentation_fragment:
- azure.azcollection.azure
@@ -489,6 +525,7 @@ servers:
try:
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+ import azure.mgmt.rdbms.postgresql_flexibleservers.models as PostgreSQLFlexibleModels
from azure.core.exceptions import ResourceNotFoundError
from azure.core.polling import LROPoller
except ImportError:
@@ -534,6 +571,18 @@ storage_spec = dict(
)
+user_assigned_identities_spec = dict(
+ id=dict(type='list', default=[], elements='str'),
+ append=dict(type='bool', default=True)
+)
+
+
+managed_identity_spec = dict(
+ type=dict(type='str', choices=['UserAssigned', 'None'], default='None'),
+ user_assigned_identities=dict(type='dict', options=user_assigned_identities_spec, default={}),
+)
+
+
class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
"""Configuration class for an Azure RM PostgreSQL Flexible Server resource"""
@@ -613,6 +662,7 @@ class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
source_server_resource_id=dict(
type='str'
),
+ identity=dict(type='dict', options=managed_identity_spec),
state=dict(
type='str',
default='present',
@@ -628,6 +678,7 @@ class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
self.is_start = None
self.is_stop = None
self.is_restart = None
+ self.identity = None
self.results = dict(changed=False)
self.state = None
@@ -663,6 +714,10 @@ class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
self.log("PostgreSQL Flexible Server instance doesn't exist")
if self.state == 'present':
if not self.check_mode:
+ if self.identity:
+ update_identity, new_identity = self.update_identities({})
+ if update_identity:
+ self.parameters['identity'] = new_identity
response = self.create_postgresqlflexibleserver(self.parameters)
if self.is_stop:
self.stop_postgresqlflexibleserver()
@@ -712,6 +767,12 @@ class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
else:
self.update_parameters['maintenance_window'][key] = old_response['maintenance_window'].get(key)
+ if self.identity:
+ update_identity, new_identity = self.update_identities(old_response.get('identity', {}))
+ if update_identity:
+ self.update_parameters['identity'] = new_identity
+ update_flag = True
+
update_tags, new_tags = self.update_tags(old_response['tags'])
self.update_parameters['tags'] = new_tags
if update_tags:
@@ -915,9 +976,50 @@ class AzureRMPostgreSqlFlexibleServers(AzureRMModuleBase):
result['maintenance_window']['start_minute'] = item.maintenance_window.start_minute
result['maintenance_window']['start_hour'] = item.maintenance_window.start_hour
result['maintenance_window']['day_of_week'] = item.maintenance_window.day_of_week
+ if item.identity is not None:
+ result['identity'] = item.identity.as_dict()
+ else:
+ result['identity'] = PostgreSQLFlexibleModels.UserAssignedIdentity(type='None').as_dict()
return result
+ def update_identities(self, curr_identity):
+ new_identities = []
+ changed = False
+ current_managed_type = curr_identity.get('type', 'None')
+ current_managed_identities = set(curr_identity.get('user_assigned_identities', {}).keys())
+ param_identity = self.module.params.get('identity')
+ param_identities = set(param_identity.get('user_assigned_identities', {}).get('id', []))
+ new_identities = param_identities
+
+ # If type set to None, and Resource has None, nothing to do
+ if 'None' in param_identity.get('type') and current_managed_type == 'None':
+ pass
+ # If type set to None, and Resource has current identities, remove UserAssigned identities
+ elif param_identity.get('type') == 'None':
+ changed = True
+ # If type in module args contains 'UserAssigned'
+ elif 'UserAssigned' in param_identity.get('type'):
+ if param_identity.get('user_assigned_identities', {}).get('append', False) is True:
+ new_identities = param_identities.union(current_managed_identities)
+ if len(current_managed_identities) != len(new_identities):
+ # update identities
+ changed = True
+ # If new identities have to overwrite current identities
+ else:
+ # Check if module args identities are different as current ones
+ if current_managed_identities.difference(new_identities) != set():
+ changed = True
+
+ # Append identities to the model
+ user_assigned_identities_dict = {uami: dict() for uami in new_identities}
+ new_identity = PostgreSQLFlexibleModels.UserAssignedIdentity(
+ type=param_identity.get('type'),
+ user_assigned_identities=user_assigned_identities_dict
+ )
+
+ return changed, new_identity
+
def main():
"""Main execution"""
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver_info.py
index 50fe9adc5..8d5f2b636 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlflexibleserver_info.py
@@ -282,6 +282,30 @@ servers:
returned: always
sample: null
type: str
+ identity:
+ description:
+ - Identity for the Server.
+ type: complex
+ returned: when available
+ contains:
+ type:
+ description:
+ - Type of the managed identity
+ returned: always
+ sample: UserAssigned
+ type: str
+ user_assigned_identities:
+ description:
+ - User Assigned Managed Identities and its options
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Dict of the user assigned identities IDs associated to the Resource
+ returned: always
+ type: dict
+ elements: dict
tags:
description:
- Tags assigned to the resource. Dictionary of string:string pairs.
@@ -293,6 +317,7 @@ servers:
try:
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+ import azure.mgmt.rdbms.postgresql_flexibleservers.models as PostgreSQLFlexibleModels
from azure.core.exceptions import ResourceNotFoundError
except ImportError:
# This is handled in azure_rm_common
@@ -431,6 +456,10 @@ class AzureRMPostgreSqlFlexibleServersInfo(AzureRMModuleBase):
result['maintenance_window']['start_minute'] = item.maintenance_window.start_minute
result['maintenance_window']['start_hour'] = item.maintenance_window.start_hour
result['maintenance_window']['day_of_week'] = item.maintenance_window.day_of_week
+ if item.identity is not None:
+ result['identity'] = item.identity.as_dict()
+ else:
+ result['identity'] = PostgreSQLFlexibleModels.UserAssignedIdentity(type='None').as_dict()
return result
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py
index 782fb0417..d1019dd65 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_rediscache_info.py
@@ -198,13 +198,13 @@ rediscaches:
contains:
primary:
description:
- - The current primary key that clients can use to authenticate the Redis cahce.
+ - The current primary key that clients can use to authenticate the Redis cache.
returned: always
type: str
sample: X2xXXxx7xxxxxx5xxxx0xxxxx75xxxxxxxxXXXxxxxx=
secondary:
description:
- - The current secondary key that clients can use to authenticate the Redis cahce.
+ - The current secondary key that clients can use to authenticate the Redis cache.
returned: always
type: str
sample: X2xXXxx7xxxxxx5xxxx0xxxxx75xxxxxxxxXXXxxxxx=
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase.py
new file mode 100644
index 000000000..83d40fc5a
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidatabase
+version_added: "2.4.0"
+short_description: Manage SQL Managed Instance databases
+description:
+ - Manage SQL Managed Instance databases.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ required: true
+ collation:
+ description:
+ - The collation of the Azure SQL Managed Database collation to use.
+ - For example C(SQL_Latin1_General_CP1_CI_AS) or C(Latin1_General_100_CS_AS_SC).
+ type: str
+ location:
+ description:
+ - The resource location.
+ type: str
+ state:
+ description:
+ - State of the SQL Managed Database.
+ - Use C(present) to create or update a automation runbook and use C(absent) to delete.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+extends_documentation_fragment:
+ - azure.azcollection.azure
+ - azure.azcollection.azure_tags
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Create a SQL managed instance database
+ azure_rm_sqlmidatabase:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ collation: SQL_Latin1_General_CP1_CI_AS
+ location: eastus
+ tags:
+ key2: value2
+
+- name: Delete the SQL managed instance database
+ azure_rm_sqlmidatabase:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ state: absent
+'''
+
+RETURN = '''
+database:
+ description:
+ - A dictionary containing facts for SQL Managed Instance database info.
+ returned: always
+ type: complex
+ contains:
+ auto_complete_restore:
+ description:
+ - Whether to auto complete restore of this managed database.
+ type: bool
+ returned: always
+ sample: null
+ catalog_collation:
+ description:
+ - Collation of the metadata catalog.
+ type: str
+ returned: always
+ sample: null
+ create_mode:
+ description:
+ - Managed database create mode.
+ type: str
+ returned: always
+ sample: null
+ create_date:
+ description:
+ - Creation date of the database.
+ type: str
+ returned: always
+ sample: "2024-05-06T23:59:49.770Z"
+ database_name:
+ description:
+ - The sql mi databse name.
+ type: str
+ returned: always
+ sample: fredtest
+ default_secondary_location:
+ description:
+ - Geo paired region.
+ type: str
+ returned: always
+ sample: westus
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxxx/resourceGroups/testRG/providers/Microsoft.Sql/managedInstances/fredsqlmin/databases/fredtest"
+ last_backup_name:
+ description:
+ - Last backup file name for restore of this managed database.
+ type: str
+ returned: always
+ sample: null
+ location:
+ description:
+ - The resource's location.
+ type: str
+ returned: always
+ sample: eastus
+ long_term_retention_backup_resource_id:
+ description:
+ - The name of the Long Term Retention backup to be used for restore of this managed database.
+ type: str
+ returned: always
+ sample: null
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ returned: always
+ sample: fredsqlmin
+ recoverable_database_id:
+ description:
+ - The resource identifier of the recoverable database associated with the database.
+ type: str
+ returned: always
+ sample: null
+ resource_group:
+ description:
+ - The resource's resource group.
+ type: str
+ returned: always
+ sample: testRG
+ restorable_dropped_database_id:
+ description:
+ - The restorable dropped database resource id.
+ type: str
+ returned: always
+ sample: null
+ restore_point_in_time:
+ description:
+ - Specifies the point in time (ISO8601 format) of the source database.
+ type: str
+ returned: always
+ sample: null
+ source_database_id:
+ description:
+ - The resource identifier of the source database associated with create operation of this database.
+ type: str
+ returned: always
+ sample: null
+ status:
+ description:
+ - Status of the database.
+ type: str
+ returned: always
+ sample: online
+ storage_container_sas_token:
+ description:
+ - Specifies the storage container sas token.
+ type: str
+ returned: always
+ sample: null
+ storage_container_uri:
+ description:
+ - Specifies the uri of the storage container where backups for this restore are stopped.
+ type: str
+ returned: always
+ sample: null
+ tags:
+ description:
+ - The resource's tags
+ type: str
+ returned: always
+ sample: {key1: value1}
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances/databases"
+
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqlMIDatabase(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ required=True,
+ ),
+ collation=dict(
+ type='str'
+ ),
+ location=dict(
+ type='str'
+ ),
+ state=dict(
+ type='str',
+ choices=['present', 'absent'],
+ default='present'
+ ),
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.state = None
+ self.parameters = dict()
+
+ super(AzureRMSqlMIDatabase, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=True, facts_module=False)
+
+ def exec_module(self, **kwargs):
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs.get(key) is not None:
+ self.parameters[key] = kwargs.get(key)
+
+ changed = False
+ resource_group = self.get_resource_group(self.resource_group)
+ if self.parameters.get('location') is None:
+ # Set default location
+ self.parameters['location'] = resource_group.location
+
+ old_response = self.get()
+ if old_response is None:
+ if self.state == 'present':
+ changed = True
+ if not self.check_mode:
+ self.results['database'] = self.create_database()
+ else:
+ update_tags, tags = self.update_tags(old_response.get('tags'))
+ if update_tags:
+ changed = True
+ self.parameters['tags'] = tags
+ for key in self.parameters.keys():
+ if key != 'tags' and self.parameters[key] != old_response.get(key):
+ self.fail("The collection and location not support to update")
+ if self.state == 'present':
+ if changed and not self.check_mode:
+ self.results['database'] = self.update_database()
+ else:
+ self.results['database'] = old_response
+ else:
+ changed = True
+ if not self.check_mode:
+ self.results['database'] = self.delete_database()
+
+ self.results['changed'] = changed
+ return self.results
+
+ def create_database(self):
+ response = None
+ try:
+ response = self.sql_client.managed_databases.begin_create_or_update(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ parameters=self.parameters)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Create the SQL managed instance database failed, exception as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def update_database(self):
+ response = None
+ try:
+ response = self.sql_client.managed_databases.begin_update(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ parameters=self.parameters)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Update the SQL managed instance database failed, exception as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_databases.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.log('Could not get facts for SQL managed instance database. Exception as {0}'.format(ec))
+
+ return self.format_item(response)
+
+ def delete_database(self):
+ response = None
+ try:
+ response = self.sql_client.managed_databases.begin_delete(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Could not get facts for SQL managed instance database. Exception as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def format_item(self, item):
+ if item is None:
+ return
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': d.get('name'),
+ 'id': d.get('id', None),
+ 'type': d.get('type', None),
+ 'location': d.get('location'),
+ 'tags': d.get('tags'),
+ 'collation': d.get('collation'),
+ 'status': d.get('status'),
+ 'creation_date': d.get('creation_date'),
+ 'restore_point_in_time': d.get('restore_point_in_time'),
+ 'default_secondary_location': d.get('default_secondary_location'),
+ 'catalog_collation': d.get('catalog_collation'),
+ 'create_mode': d.get('create_mode'),
+ 'storage_container_uri': d.get('storage_container_uri'),
+ 'source_database_id': d.get('source_database_id'),
+ 'restorable_dropped_database_id': d.get('restorable_dropped_database_id'),
+ 'storage_container_sas_token': d.get('storage_container_sas_token'),
+ 'recoverable_database_id': d.get('recoverable_database_id'),
+ 'long_term_retention_backup_resource_id': d.get('long_term_retention_backup_resource_id'),
+ 'auto_complete_restore': d.get('auto_complete_restore'),
+ 'last_backup_name': d.get('last_backup_name')
+ }
+ return d
+
+
+def main():
+ AzureRMSqlMIDatabase()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase_info.py
new file mode 100644
index 000000000..d6fe211a0
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidatabase_info.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidatabase_info
+version_added: "2.4.0"
+short_description: Get Azure SQL managed instance database facts
+description:
+ - Get facts of Azure SQL managed instance database facts.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Get SQL managed instance database by name
+ azure_rm_sqlmidatabase_info:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+'''
+
+RETURN = '''
+database:
+ description:
+ - A dictionary containing facts for SQL Managed Instance database info.
+ returned: always
+ type: complex
+ contains:
+ auto_complete_restore:
+ description:
+ - Whether to auto complete restore of this managed database.
+ type: bool
+ returned: always
+ sample: null
+ catalog_collation:
+ description:
+ - Collation of the metadata catalog.
+ type: str
+ returned: always
+ sample: null
+ create_mode:
+ description:
+ - Managed database create mode.
+ type: str
+ returned: always
+ sample: null
+ create_date:
+ description:
+ - Creation date of the database.
+ type: str
+ returned: always
+ sample: "2024-05-06T23:59:49.770Z"
+ database_name:
+ description:
+ - The sql mi databse name.
+ type: str
+ returned: always
+ sample: fredtest
+ default_secondary_location:
+ description:
+ - Geo paired region.
+ type: str
+ returned: always
+ sample: westus
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxxx/resourceGroups/testRG/providers/Microsoft.Sql/managedInstances/fredsqlmin/databases/fredtest"
+ last_backup_name:
+ description:
+ - Last backup file name for restore of this managed database.
+ type: str
+ returned: always
+ sample: null
+ location:
+ description:
+ - The resource's location.
+ type: str
+ returned: always
+ sample: eastus
+ long_term_retention_backup_resource_id:
+ description:
+ - The name of the Long Term Retention backup to be used for restore of this managed database.
+ type: str
+ returned: always
+ sample: null
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ returned: always
+ sample: fredsqlmin
+ recoverable_database_id:
+ description:
+ - The resource identifier of the recoverable database associated with the database.
+ type: str
+ returned: always
+ sample: null
+ resource_group:
+ description:
+ - The resource's resource group.
+ type: str
+ returned: always
+ sample: testRG
+ restorable_dropped_database_id:
+ description:
+ - The restorable dropped database resource id.
+ type: str
+ returned: always
+ sample: null
+ restore_point_in_time:
+ description:
+ - Specifies the point in time (ISO8601 format) of the source database.
+ type: str
+ returned: always
+ sample: null
+ source_database_id:
+ description:
+ - The resource identifier of the source database associated with create operation of this database.
+ type: str
+ returned: always
+ sample: null
+ status:
+ description:
+ - Status of the database.
+ type: str
+ returned: always
+ sample: online
+ storage_container_sas_token:
+ description:
+ - Specifies the storage container sas token.
+ type: str
+ returned: always
+ sample: null
+ storage_container_uri:
+ description:
+ - Specifies the uri of the storage container where backups for this restore are stopped.
+ type: str
+ returned: always
+ sample: null
+ tags:
+ description:
+ - The resource's tags
+ type: str
+ returned: always
+ sample: {key1: value1}
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances/databases"
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqlMIDatabaseInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ ),
+ tags=dict(
+ type='list',
+ elements='str'
+ ),
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.tags = None
+
+ super(AzureRMSqlMIDatabaseInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.database_name is not None:
+ self.results['database'] = self.get()
+ else:
+ self.results['database'] = self.list_by_instance()
+ return self.results
+
+ def list_by_instance(self):
+ response = None
+ results = []
+ try:
+ response = self.sql_client.managed_databases.list_by_instance(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError:
+ self.log('Could not get facts for SQL managed instance database.')
+
+ if response is not None:
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(self.format_item(item))
+ return results
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_databases.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.log('Could not get facts for SQL managed instance database.')
+
+ if response is not None and self.has_tags(response.tags, self.tags):
+ return [self.format_item(response)]
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': d.get('name'),
+ 'id': d.get('id', None),
+ 'type': d.get('type', None),
+ 'location': d.get('location'),
+ 'tags': d.get('tags'),
+ 'collation': d.get('collation'),
+ 'status': d.get('status'),
+ 'creation_date': d.get('creation_date'),
+ 'earliest_restore_point': d.get('earliest_restore_point'),
+ 'restore_point_in_time': d.get('restore_point_in_time'),
+ 'default_secondary_location': d.get('default_secondary_location'),
+ 'catalog_collation': d.get('catalog_collation'),
+ 'create_mode': d.get('create_mode'),
+ 'storage_container_uri': d.get('storage_container_uri'),
+ 'source_database_id': d.get('source_database_id'),
+ 'restorable_dropped_database_id': d.get('restorable_dropped_database_id'),
+ 'storage_container_sas_token': d.get('storage_container_sas_token'),
+ 'failover_group_id': d.get('failover_group_id'),
+ 'recoverable_database_id': d.get('recoverable_database_id'),
+ 'long_term_retention_backup_resource_id': d.get('long_term_retention_backup_resource_id'),
+ 'auto_complete_restore': d.get('auto_complete_restore'),
+ 'last_backup_name': d.get('last_backup_name')
+ }
+ return d
+
+
+def main():
+ AzureRMSqlMIDatabaseInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy.py
new file mode 100644
index 000000000..04b05b1c2
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidblongtermretentionpolicy
+version_added: "2.4.0"
+short_description: Manage Azure SQL Managed Instance long-term backup retention
+description:
+ - Manage Azure SQL Managed Instance long-term backup retention.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ required: true
+ policy_name:
+ description:
+ - The name of the SQL managed instance long term retention policy.
+ type: str
+ required: true
+ choices:
+ - default
+ monthly_retention:
+ description:
+ - The monthly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ yearly_retention:
+ description:
+ - The yearly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ weekly_retention:
+ description:
+ - The weekly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ week_of_year:
+ description:
+ - The week of year to take the yearly backup in an ISO 8601 format.
+ type: int
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Update SQL managed instance long term retention policy's retention_days
+ azure_rm_sqlmidblongtermretentionpolicy:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ policy_name: default
+ monthly_retention: P3M
+ week_of_year: 17
+ weekly_retention: P13W
+ yearly_retention: P6Y
+'''
+
+RETURN = '''
+long_term_retention_policy:
+ description:
+ - A dictionary containing facts for SQL Managed Instance Short Term Retention Policies.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Sql/
+ managedInstances/fredsqlmi/databases/newdatabase/backupShortTermRetentionPolicies/default"
+ database_name:
+ description:
+ - SQL managed instance database name.
+ returned: always
+ type: str
+ sample: newdatabase
+ policy_name:
+ description:
+ - SQL managed instance long term retentioni policy name.
+ returned: always
+ type: str
+ sample: default
+ managed_instance_name:
+ description:
+ - SQL managed instance name.
+ returned: always
+ type: str
+ sample: testmanagedinstance
+ type:
+ description:
+ - The SQL managed instance type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances"
+ resource_group:
+ description:
+ - The resource relate resource group.
+ type: str
+ returned: always
+ sample: testRG
+ week_of_year:
+ description:
+ - The week of year to take the yearly backup in an ISO 8601 format.
+ type: int
+ sample: 7
+ returned: always
+ weekly_retention:
+ description:
+ - The weekly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P13W
+ returned: always
+ monthly_retention:
+ description:
+ - The monthly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P3M
+ returned: always
+ yearly_retention:
+ description:
+ - The yearly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P6Y
+ returned: always
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqMILongTermRetentionPolicy(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ required=True,
+ ),
+ policy_name=dict(
+ type='str',
+ required=True,
+ choices=['default']
+ ),
+ weekly_retention=dict(
+ type='str',
+ ),
+ monthly_retention=dict(
+ type='str'
+ ),
+ yearly_retention=dict(
+ type='str'
+ ),
+ week_of_year=dict(
+ type='int'
+ )
+ )
+ # store the results of the module operation
+ self.parameters = dict()
+ self.results = dict(
+ changed=False,
+ diff=[]
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.policy_name = None
+
+ super(AzureRMSqMILongTermRetentionPolicy, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ else:
+ self.parameters[key] = kwargs.get(key)
+
+ old_response = self.get()
+
+ if old_response is not None:
+ for key in self.parameters.keys():
+ if self.parameters[key] is not None and old_response[key] != self.parameters[key]:
+ self.results['changed'] = True
+ self.results['diff'].append(key)
+ if self.results['changed'] and not self.check_mode:
+ self.results['long_term_retention_policy'] = self.create_or_update_policy()
+ else:
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['long_term_retention_policy'] = self.create_or_update_policy()
+ return self.results
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_instance_long_term_retention_policies.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError:
+ self.log('Could not get facts for SQL managed instance long term retention policyes.')
+
+ return self.format_item(response) if response is not None else None
+
+ def create_or_update_policy(self):
+ response = None
+ try:
+ response = self.sql_client.managed_instance_long_term_retention_policies.begin_create_or_update(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name,
+ parameters=self.parameters)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Could not create SQL managed instance long term retention policyes. Exception info as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': self.database_name,
+ 'id': d.get('id', None),
+ 'name': d.get('name', None),
+ 'type': d.get('type', None),
+ "monthly_retention": d.get("monthly_retention"),
+ "week_of_year": d.get("week_of_year"),
+ "weekly_retention": d.get("weekly_retention"),
+ "yearly_retention": d.get("yearly_retention")
+ }
+ return d
+
+
+def main():
+ AzureRMSqMILongTermRetentionPolicy()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy_info.py
new file mode 100644
index 000000000..11f7bce16
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidblongtermretentionpolicy_info.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidblongtermretentionpolicy_info
+version_added: "2.4.0"
+short_description: Get Azure SQL managed instance facts
+description:
+ - Get facts of Azure SQL managed instance facts.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ required: true
+ policy_name:
+ description:
+ - The name of the SQL managed instance short term retention policy.
+ type: str
+ choices:
+ - default
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Get SQL managed instance long term retention policy by name
+ azure_rm_sqlmidblongtermretentionpolicy_info:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ policy_name: default
+'''
+
+RETURN = '''
+short_term_retention_policy:
+ description:
+ - A dictionary containing facts for SQL Managed Instance Short Term Retention Policies.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Sql/
+ managedInstances/fredsqlmi/databases/newdatabase/backupShortTermRetentionPolicies/default"
+ database_name:
+ description:
+ - SQL managed instance database name.
+ returned: always
+ type: str
+ sample: newdatabase
+ policy_name:
+ description:
+ - SQL managed instance short term retentioni policy name.
+ returned: always
+ type: str
+ sample: default
+ managed_instance_name:
+ description:
+ - SQL managed instance name.
+ returned: always
+ type: str
+ sample: testmanagedinstance
+ type:
+ description:
+ - The SQL managed instance short term retention policy type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances/databases/backupShortTermRetentionPolicies"
+ resource_group:
+ description:
+ - The resource relate resource group.
+ type: str
+ returned: always
+ sample: testRG
+ week_of_year:
+ description:
+ - The week of year to take the yearly backup in an ISO 8601 format.
+ type: int
+ sample: 7
+ returned: always
+ weekly_retention:
+ description:
+ - The weekly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P13W
+ returned: always
+ monthly_retention:
+ description:
+ - The monthly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P3M
+ returned: always
+ yearly_retention:
+ description:
+ - The yearly retention policy for an LTR backup in an ISO 8601 format.
+ type: str
+ sample: P6Y
+ returned: always
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqMILongTermRetentionPolicyInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ required=True,
+ ),
+ policy_name=dict(
+ type='str',
+ choices=['default']
+ ),
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.policy_name = None
+
+ super(AzureRMSqMILongTermRetentionPolicyInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.policy_name is not None:
+ self.results['long_term_retention_policy'] = self.get()
+ else:
+ self.results['long_term_retention_policy'] = self.list_by_database()
+ return self.results
+
+ def list_by_database(self):
+ response = None
+ try:
+ response = self.sql_client.managed_instance_long_term_retention_policies.list_by_database(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError:
+ self.log('Could not get facts for SQL managed instance short term retention policyes.')
+
+ return [self.format_item(item) for item in response] if response is not None else []
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_instance_long_term_retention_policies.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail(ec)
+ self.log('Could not get facts for SQL managed instance short term retention policyes.')
+
+ return [self.format_item(response)] if response is not None else None
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': self.database_name,
+ 'id': d.get('id', None),
+ 'name': d.get('name', None),
+ 'type': d.get('type', None),
+ "monthly_retention": d.get("monthly_retention"),
+ "week_of_year": d.get("week_of_year"),
+ "weekly_retention": d.get("weekly_retention"),
+ "yearly_retention": d.get("yearly_retention")
+ }
+ return d
+
+
+def main():
+ AzureRMSqMILongTermRetentionPolicyInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy.py
new file mode 100644
index 000000000..477c393bb
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidbshorttermretentionpolicy
+version_added: "2.4.0"
+short_description: Manage SQL Managed Instance database backup short term retention policy
+description:
+ - Manage SQL Managed Instance database backup short term retention policy.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ required: true
+ policy_name:
+ description:
+ - The name of the SQL managed instance short term retention policy.
+ type: str
+ required: true
+ choices:
+ - default
+ retention_days:
+ description:
+ - The backup retention period in days. This is how many days Point-in-Time.
+ type: int
+ default: 7
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Update SQL managed instance short term retention policy's retention_days
+ azure_rm_sqlmidbshorttermretentionpolicy:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ policy_name: default
+ retention_days: 3
+'''
+
+RETURN = '''
+short_term_retention_policy:
+ description:
+ - A dictionary containing facts for SQL Managed Instance Short Term Retention Policies.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Sql/
+ managedInstances/fredsqlmi/databases/newdatabase/backupShortTermRetentionPolicies/default"
+ database_name:
+ description:
+ - SQL managed instance database name.
+ returned: always
+ type: str
+ sample: newdatabase
+ policy_name:
+ description:
+ - SQL managed instance short term retentioni policy name.
+ returned: always
+ type: str
+ sample: default
+ managed_instance_name:
+ description:
+ - SQL managed instance name.
+ returned: always
+ type: str
+ sample: testmanagedinstance
+ type:
+ description:
+ - The SQL managed instance type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances"
+ resource_group:
+ description:
+ - The resource relate resource group.
+ type: str
+ returned: always
+ sample: testRG
+ retention_days:
+ description:
+ - The backup retention period in days. This is how many days Point-in-Time.
+ type: int
+ sample: 7
+ returned: always
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqMIShortTermRetentionPolicy(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ required=True,
+ ),
+ policy_name=dict(
+ type='str',
+ required=True,
+ choices=['default']
+ ),
+ retention_days=dict(
+ type='int',
+ default=7
+ ),
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False,
+ diff=[]
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.policy_name = None
+ self.retention_days = None
+
+ super(AzureRMSqMIShortTermRetentionPolicy, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ old_response = self.get()
+
+ if old_response is not None:
+ if self.retention_days is not None and old_response['retention_days'] != self.retention_days:
+ self.results['changed'] = True
+ self.results['diff'].append('retention_days')
+ if not self.check_mode:
+ self.results['short_term_retention_policy'] = self.update_policy()
+ else:
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['short_term_retention_policy'] = self.create_policy()
+ return self.results
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_backup_short_term_retention_policies.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError:
+ self.log('Could not get facts for SQL managed instance short term retention policyes.')
+
+ return self.format_item(response) if response is not None else None
+
+ def update_policy(self):
+ response = None
+ try:
+ response = self.sql_client.managed_backup_short_term_retention_policies.begin_update(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name,
+ parameters=dict(retention_days=self.retention_days))
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Could not update the SQL managed instance short term retention policyes. Exception as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def create_policy(self):
+ response = None
+ try:
+ response = self.sql_client.managed_backup_short_term_retention_policies.begin_create_or_update(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name,
+ parameters=dict(retention_days=self.retention_days))
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.fail('Could not Create the SQL managed instance short term retention policyes. Exception as {0}'.format(ec))
+
+ return self.format_item(self.get_poller_result(response))
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': self.database_name,
+ 'id': d.get('id', None),
+ 'name': d.get('name', None),
+ 'type': d.get('type', None),
+ 'retention_days': d.get('retention_days', None),
+ }
+ return d
+
+
+def main():
+ AzureRMSqMIShortTermRetentionPolicy()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy_info.py
new file mode 100644
index 000000000..b3665b66d
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_sqlmidbshorttermretentionpolicy_info.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_sqlmidbshorttermretentionpolicy_info
+version_added: "2.4.0"
+short_description: Get Azure SQL managed instance short term retention policy
+description:
+ - Get Azure SQL managed instance short term retention policy.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ type: str
+ required: true
+ managed_instance_name:
+ description:
+ - The name of the SQL managed instance.
+ type: str
+ required: true
+ database_name:
+ description:
+ - The name of the SQL managed instance database.
+ type: str
+ required: true
+ policy_name:
+ description:
+ - The name of the SQL managed instance short term retention policy.
+ type: str
+ choices:
+ - default
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Get SQL managed instance short term retention policy by name
+ azure_rm_sqlmidbshorttermretentionpolicy_info:
+ resource_group: testrg
+ managed_instance_name: testinstancename
+ database_name: newdatabase
+ policy_name: default
+'''
+
+RETURN = '''
+short_term_retention_policy:
+ description:
+ - A dictionary containing facts for SQL Managed Instance Short Term Retention Policies.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Sql/
+ managedInstances/fredsqlmi/databases/newdatabase/backupShortTermRetentionPolicies/default"
+ database_name:
+ description:
+ - SQL managed instance database name.
+ returned: always
+ type: str
+ sample: newdatabase
+ policy_name:
+ description:
+ - SQL managed instance short term retentioni policy name.
+ returned: always
+ type: str
+ sample: default
+ managed_instance_name:
+ description:
+ - SQL managed instance name.
+ returned: always
+ type: str
+ sample: testmanagedinstance
+ type:
+ description:
+ - The SQL managed instance short term retention policy type.
+ type: str
+ returned: always
+ sample: "Microsoft.Sql/managedInstances/databases/backupShortTermRetentionPolicies"
+ resource_group:
+ description:
+ - The resource relate resource group.
+ type: str
+ returned: always
+ sample: testRG
+ retention_days:
+ description:
+ - The backup retention period in days. This is how many days Point-in-Time
+ type: int
+ sample: 7
+ returned: always
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMSqMIShortTermRetentionPolicyInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True,
+ ),
+ managed_instance_name=dict(
+ type='str',
+ required=True,
+ ),
+ database_name=dict(
+ type='str',
+ required=True,
+ ),
+ policy_name=dict(
+ type='str',
+ choices=['default']
+ ),
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.managed_instance_name = None
+ self.database_name = None
+ self.policy_name = None
+
+ super(AzureRMSqMIShortTermRetentionPolicyInfo, self).__init__(self.module_arg_spec, supports_check_mode=True, supports_tags=False, facts_module=True)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.policy_name is not None:
+ self.results['short_term_retention_policy'] = self.get()
+ else:
+ self.results['short_term_retention_policy'] = self.list_by_database()
+ return self.results
+
+ def list_by_database(self):
+ response = None
+ try:
+ response = self.sql_client.managed_backup_short_term_retention_policies.list_by_database(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError:
+ self.log('Could not get facts for SQL managed instance short term retention policyes.')
+
+ return [self.format_item(item) for item in response] if response is not None else []
+
+ def get(self):
+ response = None
+ try:
+ response = self.sql_client.managed_backup_short_term_retention_policies.get(resource_group_name=self.resource_group,
+ managed_instance_name=self.managed_instance_name,
+ database_name=self.database_name,
+ policy_name=self.policy_name)
+ self.log("Response : {0}".format(response))
+ except HttpResponseError as ec:
+ self.log('Could not get facts for SQL managed instance short term retention policyes.')
+
+ return [self.format_item(response)] if response is not None else None
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'managed_instance_name': self.managed_instance_name,
+ 'database_name': self.database_name,
+ 'id': d.get('id', None),
+ 'name': d.get('name', None),
+ 'type': d.get('type', None),
+ 'retention_days': d.get('retention_days', None),
+ }
+ return d
+
+
+def main():
+ AzureRMSqMIShortTermRetentionPolicyInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy.py
new file mode 100644
index 000000000..c0efaaf0b
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy.py
@@ -0,0 +1,672 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_storageaccountmanagementpolicy
+version_added: "2.4.0"
+short_description: Manage storage account management policies
+description:
+ - Create, update or delete storage account management policies.
+options:
+ resource_group:
+ description:
+ - Name of the resource group to use.
+ required: true
+ type: str
+ aliases:
+ - resource_group_name
+ storage_account_name:
+ description:
+ - Name of the storage account.
+ type: str
+ required: true
+ rules:
+ description:
+ - The Storage Account ManagementPolicies Rules.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the policy rule.
+ - A rule name can contain any combination of alpha numeric characters.
+ type: str
+ required: true
+ type:
+ description:
+ - The type of the policy rule.
+ type: str
+ required: true
+ choices:
+ - Lifecycle
+ enabled:
+ description:
+ - Whether to enabled the rule
+ type: bool
+ definition:
+ description:
+ - Whether to enabled the rule
+ required: true
+ type: dict
+ suboptions:
+ actions:
+ description:
+ - An object that defines the action set.
+ type: dict
+ required: true
+ suboptions:
+ base_blob:
+ description:
+ - The management policy action for base blob.
+ type: dict
+ suboptions:
+ tier_to_cool:
+ description:
+ - The function to tier blobs to cool storage.
+ - Support blobs currently at Hot tier.
+ type: dict
+ suboptions:
+ days_after_modification_greater_than:
+ description:
+ - Value indicating the age in days after last modification.
+ type: float
+ days_after_last_access_time_greater_than:
+ description:
+ - This property can only be used in conjunction with last access time tracking policy.
+ type: float
+ tier_to_archive:
+ description:
+ - The function to tier blobs to archive storage.
+ - Support blobs currently at Hot or Cool tier.
+ type: dict
+ suboptions:
+ days_after_modification_greater_than:
+ description:
+ - Value indicating the age in days after last modification.
+ type: float
+ days_after_last_access_time_greater_than:
+ description:
+ - This property can only be used in conjunction with last access time tracking policy.
+ type: float
+ delete:
+ description:
+ - The function to delete the blob.
+ type: dict
+ suboptions:
+ days_after_modification_greater_than:
+ description:
+ - Value indicating the age in days after last modification.
+ type: float
+ days_after_last_access_time_greater_than:
+ description:
+ - This property can only be used in conjunction with last access time tracking policy.
+ type: float
+ enable_auto_tier_to_hot_from_cool:
+ description:
+ - This property enables auto tiering of a blob from cool to hot on a blob access.
+ type: bool
+ snapshot:
+ description:
+ - The management policy action for snapshot.
+ type: dict
+ suboptions:
+ tier_to_cool:
+ description:
+ - The function to tier blob snapshot to cool storage.
+ - Support blob snapshot at Hot tier.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ tier_to_archive:
+ description:
+ - The function to tier blob snapshot to archive storage.
+ - Support blob snapshot currently at Hot or Cool tier.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ delete:
+ description:
+ - The function to delete the blob snapshot.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ version:
+ description:
+ - The management policy action for version.
+ type: dict
+ suboptions:
+ tier_to_cool:
+ description:
+ - The function to tier blob version to cool storage.
+ - Support blob version currently at Hot tier.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ tier_to_archive:
+ description:
+ - The function to tier blob version to archive storage.
+ - Support blob version currently at Hot or Cool tier.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ delete:
+ description:
+ - The function to delete the blob version.
+ type: dict
+ suboptions:
+ days_after_creation_greater_than:
+ description:
+ - Value indicating the age in days after creation.
+ type: float
+ required: true
+ filters:
+ description:
+ - An object that defines the filter set.
+ type: dict
+ suboptions:
+ prefix_match:
+ description:
+ - An array of strings for prefixes to be match.
+ type: list
+ elements: str
+ blob_types:
+ description:
+ - An array of predefined enum values.
+ - Currently blockBlob supports all tiering and delete actions. Only delete actions are supported for C(appendBlob).
+ type: list
+ required: true
+ elements: str
+ choices:
+ - blockBlob
+ - appendBlob
+ blob_index_match:
+ description:
+ - An array of blob index tag based filters, there can be at most 10 tag filters.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - This is the filter tag name, it can have 1 - 128 characters.
+ type: str
+ required: true
+ op:
+ description:
+ - This is the comparison operator which is used for object comparison and filtering.
+ - Only C(==) (equality operator) is currently supported.
+ type: str
+ required: true
+ value:
+ description:
+ - This is the filter tag value field used for tag based filtering.
+ - It can have 0-256 characters.
+ type: str
+ required: true
+ state:
+ description:
+ - State of the storage account managed policy. Use C(present) add or update the policy rule.
+ - Use C(absent) to delete all policy rules.
+ default: present
+ type: str
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Create storage account management policy with multi parameters
+ azure_rm_storageaccountmanagementpolicy:
+ resource_group: testRG
+ storage_account_name: testaccount
+ rules:
+ - name: olcmtest5
+ type: Lifecycle
+ enabled: false
+ definition:
+ actions:
+ base_blob:
+ enable_auto_tier_to_hot_from_cool: true
+ delete:
+ days_after_modification_greater_than: 33
+ days_after_last_access_time_greater_than: 33
+ tier_to_cool:
+ days_after_modification_greater_than: 33
+ days_after_last_access_time_greater_than: 33
+ tier_to_archive:
+ days_after_modification_greater_than: 33
+ days_after_last_access_time_greater_than: 33
+ snapshot:
+ tier_to_cool:
+ days_after_creation_greater_than: 33
+ tier_to_archive:
+ days_after_creation_greater_than: 33
+ delete:
+ days_after_creation_greater_than: 33
+ version:
+ tier_to_archive:
+ days_after_creation_greater_than: 33
+ tier_to_cool:
+ days_after_creation_greater_than: 33
+ delete:
+ days_after_creation_greater_than: 33
+ filters:
+ prefix_match:
+ - olcmtestcontainer2
+ blob_types:
+ - blockBlob
+ - appendBlob
+ blob_index_match:
+ - name: tags3
+ op: '=='
+ value: value3
+
+- name: Delete management policy rules
+ azure_rm_storageaccountmanagementpolicy:
+ resource_group: "{{ resource_group }}"
+ storage_account_name: "st{{ rpfx }}"
+ state: absent
+'''
+
+
+RETURN = '''
+state:
+ description:
+ - The data policy rules associated with the specified storage account.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The data policy's ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Storage/storageAccounts/sttest/managementPolicies/default"
+ resource_group:
+ description:
+ - The resource group name.
+ returned: always
+ type: str
+ sample: testRG
+ storage_account_name:
+ description:
+ - The storage account name.
+ returned: always
+ type: str
+ sample: teststname
+ type:
+ description:
+ - The type of the resource.
+ returned: always
+ type: str
+ sample: "Microsoft.Storage/storageAccounts/managementPolicies"
+ last_modified_time:
+ description:
+ - Returns the date and time the ManagementPolicies was last modified.
+ returned: always
+ type: str
+ sample: "2024-04-12T11:40:10.376465+00:00"
+ name:
+ description:
+ - The name of the resource.
+ returned: always
+ type: str
+ sample: DefaultManagementPolicy
+ policy:
+ description:
+ - The Storage Account ManagementPolicy.
+ returned: always
+ type: complex
+ contains:
+ rules:
+ description:
+ - The Storage Account ManagementPolicies Rules.
+ type: list
+ returned: always
+ sample: [
+ {
+ "definition": {
+ "actions": {
+ "base_blob": {
+ "delete": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ },
+ "enable_auto_tier_to_hot_from_cool": true,
+ "tier_to_archive": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ }
+ },
+ "snapshot": {
+ "delete": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_archive": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_creation_greater_than": 33.0
+ }
+ },
+ "version": {
+ "delete": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_archive": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_creation_greater_than": 33.0
+ }
+ }
+ },
+ "filters": {
+ "blob_index_match": [
+ {
+ "name": "tags3",
+ "op": "==",
+ "value": "value3"
+ }
+ ],
+ "blob_types": [
+ "blockBlob",
+ "appendBlob"
+ ],
+ "prefix_match": [
+ "olcmtestcontainer2"
+ ]
+ }
+ },
+ "enabled": false,
+ "name": "olcmtest5",
+ "type": "Lifecycle"
+ }
+ ]
+'''
+
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
+try:
+ from azure.core.exceptions import ResourceNotFoundError
+except Exception:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMStorageAccountManagementPolicy(AzureRMModuleBaseExt):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ resource_group=dict(required=True, type='str', aliases=['resource_group_name']),
+ storage_account_name=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ rules=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ enabled=dict(type='bool'),
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=['Lifecycle']),
+ definition=dict(
+ type='dict',
+ required=True,
+ options=dict(
+ actions=dict(
+ type='dict',
+ required=True,
+ options=dict(
+ base_blob=dict(
+ type='dict',
+ options=dict(
+ tier_to_cool=dict(
+ type='dict',
+ options=dict(
+ days_after_modification_greater_than=dict(type='float'),
+ days_after_last_access_time_greater_than=dict(type='float')
+ )
+ ),
+ tier_to_archive=dict(
+ type='dict',
+ options=dict(
+ days_after_modification_greater_than=dict(type='float'),
+ days_after_last_access_time_greater_than=dict(type='float')
+ )
+ ),
+ delete=dict(
+ type='dict',
+ options=dict(
+ days_after_modification_greater_than=dict(type='float'),
+ days_after_last_access_time_greater_than=dict(type='float')
+ )
+ ),
+ enable_auto_tier_to_hot_from_cool=dict(type='bool')
+ )
+ ),
+ snapshot=dict(
+ type='dict',
+ options=dict(
+ tier_to_cool=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(type='float', required=True)
+ )
+ ),
+ tier_to_archive=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(type='float', required=True)
+ )
+ ),
+ delete=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(type='float', required=True)
+ )
+ )
+ )
+ ),
+ version=dict(
+ type='dict',
+ options=dict(
+ tier_to_cool=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(
+ type='float',
+ required=True
+ )
+ )
+ ),
+ tier_to_archive=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(
+ type='float',
+ required=True
+ )
+ )
+ ),
+ delete=dict(
+ type='dict',
+ options=dict(
+ days_after_creation_greater_than=dict(
+ type='float',
+ required=True
+ )
+ )
+ )
+ )
+ )
+ )
+ ),
+ filters=dict(
+ type='dict',
+ options=dict(
+ prefix_match=dict(type='list', elements='str'),
+ blob_types=dict(type='list', elements='str', choices=['blockBlob', 'appendBlob'], required=True),
+ blob_index_match=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str', required=True),
+ op=dict(type='str', required=True),
+ value=dict(type='str', required=True)
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+
+ self.results = dict(
+ changed=False,
+ state=dict()
+ )
+
+ self.resource_group = None
+ self.storage_account_name = None
+ self.state = None
+ self.rules = []
+
+ super(AzureRMStorageAccountManagementPolicy, self).__init__(self.module_arg_spec,
+ supports_tags=False,
+ supports_check_mode=True)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec.keys()):
+ setattr(self, key, kwargs[key])
+
+ managed_policy = self.get_management_policy()
+ changed = False
+
+ if self.state == 'present':
+ if managed_policy is not None:
+ rules = []
+ for item in managed_policy['policy']['rules']:
+ rules.append(item)
+ rules_name = [item['name'] for item in rules]
+ for item in self.rules:
+ if item['name'] in rules_name:
+ for tt in managed_policy['policy']['rules']:
+ if item['name'] == tt['name']:
+ old_item = tt
+ if not self.default_compare({}, item, old_item, '', dict(compare=[])):
+ rules.remove(old_item)
+ rules.append(item)
+ changed = True
+ else:
+ rules.append(item)
+ changed = True
+ if changed and not self.check_mode:
+ self.create_or_update_management_policy(rules)
+ else:
+ changed = True
+ if not self.check_mode:
+ self.create_or_update_management_policy(self.rules)
+ else:
+ if managed_policy is not None:
+ changed = True
+ if not self.check_mode:
+ self.delete_management_policy()
+
+ self.results['state'] = self.get_management_policy()
+ self.results['changed'] = changed
+
+ return self.results
+
+ def get_management_policy(self):
+ self.log('Get info for storage account management policy')
+
+ response = None
+ try:
+ response = self.storage_client.management_policies.get(self.resource_group, self.storage_account_name, 'default')
+ except ResourceNotFoundError as ec:
+ self.log("Failed to obtain the storage acount management policy, detail as {0}".format(ec))
+ return None
+ return self.format_to_dict(response)
+
+ def create_or_update_management_policy(self, rules):
+ self.log("Creating or updating storage account mangement policy")
+
+ try:
+ self.storage_client.management_policies.create_or_update(resource_group_name=self.resource_group,
+ account_name=self.storage_account_name,
+ management_policy_name='default',
+ properties=dict(policy=dict(rules=rules)))
+ except Exception as e:
+ self.log('Error creating or updating storage account management policy.')
+ self.fail("Failed to create or updating storage account management policy: {0}".format(str(e)))
+ return self.get_management_policy()
+
+ def delete_management_policy(self):
+ try:
+ self.storage_client.management_policies.delete(self.resource_group, self.storage_account_name, 'default')
+ except Exception as e:
+ self.fail("Failed to delete the storage account management policy: {0}".format(str(e)))
+
+ def format_to_dict(self, obj):
+ result = dict()
+ result['id'] = obj.id
+ result['resource_group'] = self.resource_group
+ result['storage_account_name'] = self.storage_account_name
+ result['name'] = obj.name
+ result['type'] = obj.type
+ result['last_modified_time'] = obj.last_modified_time
+ result['policy'] = dict(rules=[])
+ if obj.policy is not None:
+ result['policy'] = obj.policy.as_dict()
+
+ return result
+
+
+def main():
+ AzureRMStorageAccountManagementPolicy()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy_info.py
new file mode 100644
index 000000000..1ffa1d21f
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_storageaccountmanagementpolicy_info.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_storageaccountmanagementpolicy_info
+
+version_added: "2.4.0"
+
+short_description: Get the data policy rules associated with the specified storage account
+
+description:
+ - Get the data policy rules associated with the specified storage account.
+
+options:
+ resource_group:
+ description:
+ - Name of the resource group to use.
+ required: true
+ type: str
+ aliases:
+ - resource_group_name
+ storage_account_name:
+ description:
+ - Name of the storage account to update or create.
+ type: str
+ required: true
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred-sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Get the data policy rules associated with the specified storage account
+ azure_rm_storageaccountmanagementpolicy_info:
+ resource_group: myResourceGroup
+ storage_account_name: testaccount
+'''
+
+
+RETURN = '''
+state:
+ description:
+ - The data policy rules associated with the specified storage account.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The data policy's ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Storage/storageAccounts/sttest/managementPolicies/default"
+ resource_group:
+ description:
+ - The resource group name.
+ returned: always
+ type: str
+ sample: testRG
+ storage_account_name:
+ description:
+ - The storage account name.
+ returned: always
+ type: str
+ sample: teststname
+ type:
+ description:
+ - The type of the resource.
+ returned: always
+ type: str
+ sample: "Microsoft.Storage/storageAccounts/managementPolicies"
+ last_modified_time:
+ description:
+ - Returns the date and time the ManagementPolicies was last modified.
+ returned: always
+ type: str
+ sample: "2024-04-12T11:40:10.376465+00:00"
+ name:
+ description:
+ - The name of the resource.
+ returned: always
+ type: str
+ sample: DefaultManagementPolicy
+ policy:
+ description:
+ - The Storage Account ManagementPolicy.
+ returned: always
+ type: complex
+ contains:
+ rules:
+ description:
+ - The Storage Account ManagementPolicies Rules.
+ type: list
+ returned: always
+ sample: [
+ {
+ "definition": {
+ "actions": {
+ "base_blob": {
+ "delete": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ },
+ "enable_auto_tier_to_hot_from_cool": true,
+ "tier_to_archive": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_last_access_time_greater_than": 33.0,
+ "days_after_modification_greater_than": 33.0
+ }
+ },
+ "snapshot": {
+ "delete": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_archive": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_creation_greater_than": 33.0
+ }
+ },
+ "version": {
+ "delete": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_archive": {
+ "days_after_creation_greater_than": 33.0
+ },
+ "tier_to_cool": {
+ "days_after_creation_greater_than": 33.0
+ }
+ }
+ },
+ "filters": {
+ "blob_index_match": [
+ {
+ "name": "tags3",
+ "op": "==",
+ "value": "value3"
+ }
+ ],
+ "blob_types": [
+ "blockBlob",
+ "appendBlob"
+ ],
+ "prefix_match": [
+ "olcmtestcontainer2"
+ ]
+ }
+ },
+ "enabled": false,
+ "name": "olcmtest5",
+ "type": "Lifecycle"
+ }
+ ]
+'''
+
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+try:
+ from azure.core.exceptions import ResourceNotFoundError
+except Exception:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMStorageAccountManagementPolicyInfo(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ resource_group=dict(required=True, type='str', aliases=['resource_group_name']),
+ storage_account_name=dict(type='str', required=True),
+ )
+
+ self.results = dict(
+ changed=False,
+ state=dict()
+ )
+
+ self.resource_group = None
+ self.storage_account_name = None
+ self.state = None
+ self.rules = None
+
+ super(AzureRMStorageAccountManagementPolicyInfo, self).__init__(self.module_arg_spec,
+ supports_tags=False,
+ supports_check_mode=True)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec.keys()):
+ setattr(self, key, kwargs[key])
+
+ self.results['state'] = self.get_management_policy()
+
+ return self.results
+
+ def get_management_policy(self):
+ self.log('Get info for storage account management policy')
+
+ response = None
+ try:
+ response = self.storage_client.management_policies.get(self.resource_group, self.storage_account_name, 'default')
+ except ResourceNotFoundError as ec:
+ self.log("Failed to obtain the storage acount management policy, detail as {0}".format(ec))
+ return
+
+ return self.format_to_dict(response)
+
+ def format_to_dict(self, obj):
+ result = dict()
+ result['id'] = obj.id
+ result['resource_group'] = self.resource_group
+ result['storage_account_name'] = self.storage_account_name
+ result['name'] = obj.name
+ result['type'] = obj.type
+ result['last_modified_time'] = obj.last_modified_time
+ result['policy'] = dict(rules=[])
+ if obj.policy is not None:
+ result['policy'] = obj.policy.as_dict()
+
+ return result
+
+
+def main():
+ AzureRMStorageAccountManagementPolicyInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py
index e845e2fa1..d11dbd185 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachine.py
@@ -221,6 +221,7 @@ options:
- Type of OS disk caching.
type: str
choices:
+ - None
- ReadOnly
- ReadWrite
aliases:
@@ -270,6 +271,11 @@ options:
description:
- ID of disk encryption set for data disk.
type: str
+ managed_disk_id:
+ description:
+ - The ID of the existing data disk.
+ - If specified, attach mode will be chosen.
+ type: str
managed_disk_type:
description:
- Managed data disk type.
@@ -313,6 +319,7 @@ options:
- Type of data disk caching.
type: str
choices:
+ - None
- ReadOnly
- ReadWrite
public_ip_allocation_method:
@@ -1145,7 +1152,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
storage_account_name=dict(type='str', aliases=['storage_account']),
storage_container_name=dict(type='str', aliases=['storage_container'], default='vhds'),
storage_blob_name=dict(type='str', aliases=['storage_blob']),
- os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite']),
+ os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['None', 'ReadOnly', 'ReadWrite']),
os_disk_size_gb=dict(type='int'),
os_disk_encryption_set=dict(type='str'),
managed_disk_type=dict(type='str', choices=['Standard_LRS', 'StandardSSD_LRS', 'StandardSSD_ZRS', 'Premium_LRS', 'Premium_ZRS', 'UltraSSD_LRS']),
@@ -1181,12 +1188,13 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
lun=dict(type='int', required=True),
disk_size_gb=dict(type='int'),
disk_encryption_set=dict(type='str'),
+ managed_disk_id=dict(type='str'),
managed_disk_type=dict(type='str', choices=['Standard_LRS', 'StandardSSD_LRS',
'StandardSSD_ZRS', 'Premium_LRS', 'Premium_ZRS', 'UltraSSD_LRS']),
storage_account_name=dict(type='str'),
storage_container_name=dict(type='str', default='vhds'),
storage_blob_name=dict(type='str'),
- caching=dict(type='str', choices=['ReadOnly', 'ReadWrite'])
+ caching=dict(type='str', choices=['None', 'ReadOnly', 'ReadWrite'])
)
),
plan=dict(type='dict'),
@@ -1965,41 +1973,49 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
count = 0
for data_disk in self.data_disks:
- if not data_disk.get('managed_disk_type'):
- if not data_disk.get('storage_blob_name'):
- data_disk['storage_blob_name'] = self.name + '-data-' + str(count) + '.vhd'
- count += 1
+ data_disk_vhd = None
+ disk_name = None
- if data_disk.get('storage_account_name'):
- data_disk_storage_account = self.get_storage_account(self.resource_group, data_disk['storage_account_name'])
- else:
- data_disk_storage_account = self.create_default_storage_account()
- self.log("data disk storage account:")
- self.log(self.serialize_obj(data_disk_storage_account, 'StorageAccount'), pretty_print=True)
-
- if not data_disk.get('storage_container_name'):
- data_disk['storage_container_name'] = 'vhds'
-
- data_disk_requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(
- data_disk_storage_account.name,
- self._cloud_environment.suffixes.storage_endpoint,
- data_disk['storage_container_name'],
- data_disk['storage_blob_name']
- )
-
- if not data_disk.get('managed_disk_type'):
- data_disk_managed_disk = None
- disk_name = data_disk['storage_blob_name']
- data_disk_vhd = self.compute_models.VirtualHardDisk(uri=data_disk_requested_vhd_uri)
+ if data_disk.get('managed_disk_id'):
+ create_option = self.compute_models.DiskCreateOptionTypes.attach
+ data_disk_managed_disk = self.compute_models.ManagedDiskParameters(id=data_disk.get('managed_disk_id'))
else:
- data_disk_vhd = None
- data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=data_disk['managed_disk_type'])
- if data_disk.get('disk_encryption_set'):
- data_disk_managed_disk.disk_encryption_set = self.compute_models.DiskEncryptionSetParameters(
- id=data_disk['disk_encryption_set']
+ create_option = self.compute_models.DiskCreateOptionTypes.empty
+
+ if not data_disk.get('managed_disk_type'):
+ if not data_disk.get('storage_blob_name'):
+ data_disk['storage_blob_name'] = self.name + '-data-' + str(count) + '.vhd'
+ count += 1
+
+ if data_disk.get('storage_account_name'):
+ data_disk_storage_account = self.get_storage_account(self.resource_group, data_disk['storage_account_name'])
+ else:
+ data_disk_storage_account = self.create_default_storage_account()
+ self.log("data disk storage account:")
+ self.log(self.serialize_obj(data_disk_storage_account, 'StorageAccount'), pretty_print=True)
+
+ if not data_disk.get('storage_container_name'):
+ data_disk['storage_container_name'] = 'vhds'
+
+ data_disk_requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(
+ data_disk_storage_account.name,
+ self._cloud_environment.suffixes.storage_endpoint,
+ data_disk['storage_container_name'],
+ data_disk['storage_blob_name']
)
- disk_name = self.name + "-datadisk-" + str(count)
- count += 1
+
+ if not data_disk.get('managed_disk_type'):
+ data_disk_managed_disk = None
+ disk_name = data_disk['storage_blob_name']
+ data_disk_vhd = self.compute_models.VirtualHardDisk(uri=data_disk_requested_vhd_uri)
+ else:
+ data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=data_disk['managed_disk_type'])
+ if data_disk.get('disk_encryption_set'):
+ data_disk_managed_disk.disk_encryption_set = self.compute_models.DiskEncryptionSetParameters(
+ id=data_disk['disk_encryption_set']
+ )
+ disk_name = self.name + "-datadisk-" + str(count)
+ count += 1
data_disk['caching'] = data_disk.get(
'caching', 'ReadOnly'
@@ -2010,7 +2026,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
name=disk_name,
vhd=data_disk_vhd,
caching=data_disk['caching'],
- create_option=self.compute_models.DiskCreateOptionTypes.empty,
+ create_option=create_option,
disk_size_gb=data_disk['disk_size_gb'],
managed_disk=data_disk_managed_disk,
))
@@ -2941,8 +2957,6 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
pip = self.network_models.PublicIPAddress(id=pip_facts.id, location=pip_facts.location, resource_guid=pip_facts.resource_guid, sku=sku)
self.tags['_own_pip_'] = self.name + '01'
- self.tags['_own_nsg_'] = self.name + '01'
-
parameters = self.network_models.NetworkInterface(
location=self.location,
ip_configurations=[
@@ -2961,6 +2975,7 @@ class AzureRMVirtualMachine(AzureRMModuleBase):
parameters.network_security_group = self.network_models.NetworkSecurityGroup(id=group.id,
location=group.location,
resource_guid=group.resource_guid)
+ self.tags['_own_nsg_'] = self.name + '01'
parameters.ip_configurations[0].public_ip_address = pip
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py
index 402af0072..97878dcf4 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance.py
@@ -295,16 +295,35 @@ class AzureRMVirtualMachineScaleSetInstance(AzureRMModuleBase):
def format_response(self, item):
d = item.as_dict()
- iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=d.get('instance_id', None)).as_dict()
- power_state = ""
- for index in range(len(iv['statuses'])):
- code = iv['statuses'][index]['code'].split('/')
- if code[0] == 'PowerState':
- power_state = code[1]
- break
- d = {
+ instance = None
+ power_state = ''
+ if d.get('provisioning_state', None) is not None:
+ iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group,
+ vm_scale_set_name=self.vmss_name,
+ instance_id=d.get('instance_id', None)).as_dict()
+ for index in range(len(iv['statuses'])):
+ code = iv['statuses'][index]['code'].split('/')
+ if code[0] == 'PowerState':
+ power_state = code[1]
+ break
+ else:
+ try:
+ instance = self.compute_client.virtual_machines.instance_view(self.resource_group, d.get('instance_id', None)).as_dict()
+ vm_instance = self.compute_client.virtual_machines.get(self.resource_group, d.get('instance_id', None)).as_dict()
+ except Exception as exc:
+ self.fail("Getting Flexible VMSS instance instance failed, name {0} instance view - {1}".format(d.get('instance_id'), str(exc)))
+
+ for index in range(len(instance['statuses'])):
+ code = instance['statuses'][index]['code'].split('/')
+ if code[0] == 'PowerState':
+ power_state = code[1]
+ elif code[0] == 'OSState' and code[1] == 'generalized':
+ power_state = 'generalized'
+ break
+ elif code[0] == 'ProvisioningState' and code[1] == 'failed':
+ power_state = ''
+ break
+ dd = {
'id': d.get('id'),
'tags': d.get('tags'),
'instance_id': d.get('instance_id'),
@@ -312,7 +331,9 @@ class AzureRMVirtualMachineScaleSetInstance(AzureRMModuleBase):
'power_state': power_state,
'protection_policy': d.get('protection_policy')
}
- return d
+ if d.get('provisioning_state') is None:
+ dd['tags'] = vm_instance.get('tags', None)
+ return dd
def main():
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py
index 47a3d3318..ba94461cb 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualmachinescalesetinstance_info.py
@@ -211,16 +211,35 @@ class AzureRMVirtualMachineScaleSetVMInfo(AzureRMModuleBase):
def format_response(self, item):
d = item.as_dict()
- iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=d.get('instance_id', None)).as_dict()
- power_state = ""
- for index in range(len(iv['statuses'])):
- code = iv['statuses'][index]['code'].split('/')
- if code[0] == 'PowerState':
- power_state = code[1]
- break
- d = {
+ instance = None
+ power_state = ''
+ if d.get('provisioning_state') is not None:
+ iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group,
+ vm_scale_set_name=self.vmss_name,
+ instance_id=d.get('instance_id', None)).as_dict()
+ for index in range(len(iv['statuses'])):
+ code = iv['statuses'][index]['code'].split('/')
+ if code[0] == 'PowerState':
+ power_state = code[1]
+ break
+ else:
+ try:
+ instance = self.compute_client.virtual_machines.instance_view(self.resource_group, d.get('instance_id', None)).as_dict()
+ vm_instance = self.compute_client.virtual_machines.get(self.resource_group, d.get('instance_id', None)).as_dict()
+ except Exception as exc:
+ self.fail("Getting Flexible VMSS instance instance failed, name {0} instance view - {1}".format(d.get('instance_id'), str(exc)))
+
+ for index in range(len(instance['statuses'])):
+ code = instance['statuses'][index]['code'].split('/')
+ if code[0] == 'PowerState':
+ power_state = code[1]
+ elif code[0] == 'OSState' and code[1] == 'generalized':
+ power_state = 'generalized'
+ break
+ elif code[0] == 'ProvisioningState' and code[1] == 'failed':
+ power_state = ''
+ break
+ dd = {
'resource_group': self.resource_group,
'id': d.get('id', None),
'tags': d.get('tags', None),
@@ -230,10 +249,17 @@ class AzureRMVirtualMachineScaleSetVMInfo(AzureRMModuleBase):
'provisioning_state': d.get('provisioning_state', None),
'power_state': power_state,
'vm_id': d.get('vm_id', None),
- 'image_reference': d.get('storage_profile').get('image_reference', None),
- 'computer_name': d.get('os_profile').get('computer_name', None)
}
- return d
+ if d.get('provisioning_state') is not None:
+ dd['image_reference'] = d.get('storage_profile').get('image_reference', None)
+ dd['computer_name'] = d.get('os_profile').get('computer_name', None)
+ else:
+ dd['image_reference'] = vm_instance.get('storage_profile').get('image_reference', None)
+ dd['computer_name'] = vm_instance.get('os_profile').get('computer_name', None)
+ dd['provisioning_state'] = vm_instance.get('provisioning_state', None)
+ dd['tags'] = vm_instance.get('tags', None)
+ dd['vm_id'] = vm_instance.get('vm_id')
+ return dd
def main():
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule.py
new file mode 100644
index 000000000..51b7da15e
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_virtualnetworkgatewaynatrule
+
+version_added: "2.4.0"
+
+short_description: Gets or list the specified local network gateway in a resource group
+
+description:
+ - Gets or list the specified local network gateway in a resource group.
+
+options:
+ resource_group:
+ description:
+ - The local network gateway's resource group.
+ type: str
+ required: true
+ virtual_network_gateway_name:
+ description:
+ - The name of the local network gateway.
+ type: str
+ required: true
+ name:
+ description:
+ - he name of the resource that is unique within a resource group.
+ type: str
+ required: true
+ type_properties_type:
+ description:
+ - The type of NAT rule for VPN NAT.
+ type: str
+ choices:
+ - Dynamic
+ - Static
+ mode:
+ description:
+ - The Source NAT direction of a VPN NAT.
+ type: str
+ choices:
+ - EgressSnat
+ - IngressSnat
+ ip_configuration_id:
+ description:
+ - The IP Configuration ID this NAT rule applies to.
+ type: str
+ external_mappings:
+ description:
+ - The private IP address external mapping for NAT.
+ type: list
+ elements: str
+ internal_mappings:
+ description:
+ - The private IP address internal mapping for NAT.
+ type: list
+ elements: str
+ state:
+ description:
+ - Use C(present) to create or update the virtual network gateway nat rule.
+ - Use C(absent) to delete the nat rule.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+extends_documentation_fragment:
+ - azure.azcollection.azure
+ - azure.azcollection.azure_tags
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred Sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Create a virtual netowrk nat rule
+ azure_rm_virtualnetworkgatewaynatrule:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ vngname }}"
+ name: "{{ natrulename }}"
+ type_properties_type: Dynamic
+ ip_configuration_id: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworkGateways/testRG/ipConfigurations/ipconfig"
+ mode: EgressSnat
+ internal_mappings:
+ - 10.1.0.0/24
+ external_mappings:
+ - 192.168.1.0/24
+
+- name: Delete the virtual netowrk nat rule
+ azure_rm_virtualnetworkgatewaynatrule:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ vngname }}"
+ name: "{{ natrulename }}"
+ state: absent
+'''
+
+RETURN = '''
+state:
+ description:
+ - Gets the nat rules for a particular virtual network gateway
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworkGateways/vng01/natRules/natrule"
+ internal_mappings:
+ description:
+ - The private IP address internal mapping for NAT.
+ type: list
+ returned: always
+ sample: ["10.1.0.0/24"]
+ external_mappings:
+ description:
+ - The private IP address external mapping for NAT.
+ type: list
+ returned: always
+ sample: ["192.168.1.0/24"]
+ ip_configuration_id:
+ description:
+ - he IP Configuration ID this NAT rule applies to.
+ type: str
+ returned: always
+ sample: "/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.Network/virtualNetworkGateways/gateway1/ipConfigurations/default"
+ type_properties_type:
+ description:
+ - The type of NAT rule for VPN NAT.
+ type: str
+ returned: always
+ sample: Static
+ mode:
+ description:
+ - The Source NAT direction of a VPN NAT.
+ type: str
+ returned: always
+ sample: EgressSnat
+ name:
+ description:
+ - The resource name.
+ type: str
+ returned: always
+ sample: natrule_name
+ resource_group:
+ description:
+ - The resource group name.
+ type: str
+ returned: always
+ sample: testRG
+ etag:
+ description:
+ - A unique read-only string that changes whenever the resource is updated.
+ type: str
+ returned: always
+ sample: b5a32693-2e75-49e0-9137-ded19db658d6
+ provisioning_state:
+ description:
+ - The provisioning state of the nat rule resource.
+ type: str
+ returned: always
+ sample: Succeeded
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: Microsoft.Network/virtualNetworkGateways/natRules
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+ from azure.core.polling import LROPoller
+except Exception:
+ # handled in azure_rm_common
+ pass
+
+
+class AzureRMVirtualNetworkNatGateway(AzureRMModuleBase):
+ """Utility class to get Azure Kubernetes Service Credentials facts"""
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ name=dict(type='str', required=True),
+ resource_group=dict(type='str', required=True),
+ virtual_network_gateway_name=dict(type='str', required=True),
+ type_properties_type=dict(type='str', choices=['Dynamic', 'Static']),
+ mode=dict(type='str', choices=['EgressSnat', 'IngressSnat']),
+ ip_configuration_id=dict(type='str'),
+ external_mappings=dict(type='list', elements='str'),
+ internal_mappings=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+
+ self.type_properties_type = None
+ self.mode = None
+ self.ip_configuration_id = None
+ self.external_mappings = None
+ self.internal_mappings = None
+
+ self.results = dict(
+ changed=False,
+ state=[],
+ )
+ required_if = [('type_properties_type', 'Dynamic', ['ip_configuration_id'])]
+
+ super(AzureRMVirtualNetworkNatGateway, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ supports_tags=True,
+ facts_module=False)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec):
+ setattr(self, key, kwargs[key])
+
+ old_response = self.get_nat_rule()
+ changed = False
+ response = None
+
+ if self.state == 'present':
+ if old_response is not None:
+ if self.type_properties_type is not None and self.type_properties_type != old_response['type_properties_type']:
+ self.fail("NAT type_properties_type cannot be changed.")
+ else:
+ self.type_properties_type = old_response['type_properties_type']
+ if self.mode is not None and self.mode != old_response['mode']:
+ self.fail("NAT mode cannot be changed.")
+ else:
+ self.mode = old_response['mode']
+ if self.ip_configuration_id is not None and self.ip_configuration_id != old_response['ip_configuration_id']:
+ changed = True
+ else:
+ self.ip_configuration_id = old_response['ip_configuration_id']
+ if self.internal_mappings is not None and old_response['internal_mappings'] != self.internal_mappings:
+ changed = True
+ else:
+ self.internal_mappings = old_response['internal_mappings']
+
+ if self.external_mappings is not None and self.external_mappings != old_response['external_mappings']:
+ changed = True
+ else:
+ self.external_mappings = old_response['external_mappings']
+ else:
+ changed = True
+
+ internal_mappings = None
+ external_mappings = None
+ if self.internal_mappings is not None:
+ internal_mappings = [self.network_models.VpnNatRuleMapping(address_space=item) for item in self.internal_mappings]
+ if self.external_mappings is not None:
+ external_mappings = [self.network_models.VpnNatRuleMapping(address_space=item) for item in self.external_mappings]
+
+ natrule_resource = self.network_models.VirtualNetworkGatewayNatRule(name=self.name,
+ type_properties_type=self.type_properties_type,
+ mode=self.mode,
+ ip_configuration_id=self.ip_configuration_id,
+ internal_mappings=internal_mappings,
+ external_mappings=external_mappings)
+ if changed:
+ if not self.check_mode:
+ response = self.create_or_update_local_network_gateway(natrule_resource)
+ else:
+ if not self.check_mode:
+ if old_response is not None:
+ self.delete_local_network_gateway()
+ changed = True
+ response = None
+ else:
+ changed = True
+
+ if response is None:
+ response = old_response
+ self.results['state'] = response
+ self.results['changed'] = changed
+ return self.results
+
+ def get_nat_rule(self):
+ """Gets the specified nat rule"""
+ response = None
+ try:
+ response = self.network_client.virtual_network_gateway_nat_rules.get(self.resource_group, self.virtual_network_gateway_name, self.name)
+ except HttpResponseError as ec:
+ self.log("Gets the specified local network gateway in a resource group Failed, Exception as {0}".format(ec))
+ return None
+ return self.format_response(response)
+
+ def create_or_update_local_network_gateway(self, body):
+ """Create or Update local network gateway"""
+ response = None
+ try:
+ response = self.network_client.virtual_network_gateway_nat_rules.begin_create_or_update(self.resource_group,
+ self.virtual_network_gateway_name, self.name, body)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+ except HttpResponseError as ec:
+ self.fail("Create or Updated a local network gateway in a resource group Failed, Exception as {0}".format(ec))
+
+ return self.format_response(response)
+
+ def delete_local_network_gateway(self):
+ """Deletes the specified local network gateway"""
+ try:
+ self.network_client.virtual_network_gateway_nat_rules.begin_delete(self.resource_group, self.virtual_network_gateway_name, self.name)
+ except HttpResponseError as ec:
+ self.fail("Deletes the specified nat rule, Exception as {0}".format(ec))
+ return None
+
+ def format_response(self, item):
+ result = dict(
+ resource_group=self.resource_group,
+ id=item.id,
+ name=item.name,
+ type=item.type,
+ etag=item.etag,
+ provisioning_state=item.provisioning_state,
+ type_properties_type=item.type_properties_type,
+ mode=item.mode,
+ internal_mappings=list(),
+ external_mappings=list(),
+ ip_configuration_id=item.ip_configuration_id
+ )
+
+ if item.internal_mappings is not None:
+ for value in item.internal_mappings:
+ result['internal_mappings'].append(value.address_space)
+ if item.external_mappings is not None:
+ for value in item.external_mappings:
+ result['external_mappings'].append(value.address_space)
+ return result
+
+
+def main():
+ """Main module execution code path"""
+
+ AzureRMVirtualNetworkNatGateway()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule_info.py
new file mode 100644
index 000000000..8aeb6649d
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_virtualnetworkgatewaynatrule_info.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024 xuzhang3 (@xuzhang3), Fred-sun (@Fred-sun)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_virtualnetworkgatewaynatrule_info
+
+version_added: "2.4.0"
+
+short_description: Gets or list nat rules for a particular virtual network gateway
+
+description:
+ - Gets or list nat rules for a particular virtual network gateway.
+
+options:
+ resource_group:
+ description:
+ - The local network gateway's resource group.
+ type: str
+ required: true
+ virtual_network_gateway_name:
+ description:
+ - The name of the local network gateway.
+ type: str
+ required: true
+ name:
+ description:
+ - The name of the nat rule.
+ type: str
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - xuzhang3 (@xuzhang3)
+ - Fred Sun (@Fred-sun)
+'''
+
+EXAMPLES = '''
+- name: Gets the nat rule by the name
+ azure_rm_virtualnetworkgatewaynatrule_info:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ local_networkgateway_name }}"
+ name: "{{ name }}"
+
+- name: List all nat rules for a particular virtual network gateway
+ azure_rm_virtualnetworkgatewaynatrule_info:
+ resource_group: "{{ resource_group }}"
+ virtual_network_gateway_name: "{{ local_networkgateway_name }}"
+'''
+
+RETURN = '''
+state:
+ description:
+ - Gets the nat rules for a particular virtual network gateway
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - The resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworkGateways/vng01/natRules/natrule"
+ internal_mappings:
+ description:
+ - The private IP address internal mapping for NAT.
+ type: list
+ returned: always
+ sample: ["10.1.0.0/24"]
+ external_mappings:
+ description:
+ - The private IP address external mapping for NAT.
+ type: list
+ returned: always
+ sample: ["192.168.1.0/24"]
+ ip_configuration_id:
+ description:
+ - he IP Configuration ID this NAT rule applies to.
+ type: str
+ returned: always
+ sample: "/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.Network/virtualNetworkGateways/gateway1/ipConfigurations/default"
+ type_properties_type:
+ description:
+ - The type of NAT rule for VPN NAT.
+ type: str
+ returned: always
+ sample: Static
+ mode:
+ description:
+ - The Source NAT direction of a VPN NAT.
+ type: str
+ returned: always
+ sample: EgressSnat
+ name:
+ description:
+ - The resource name.
+ type: str
+ returned: always
+ sample: natrule_name
+ resource_group:
+ description:
+ - The resource group name.
+ type: str
+ returned: always
+ sample: testRG
+ etag:
+ description:
+ - A unique read-only string that changes whenever the resource is updated.
+ type: str
+ returned: always
+ sample: b5a32693-2e75-49e0-9137-ded19db658d6
+ provisioning_state:
+ description:
+ - The provisioning state of the nat rule resource.
+ type: str
+ returned: always
+ sample: Succeeded
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: Microsoft.Network/virtualNetworkGateways/natRules
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.core.exceptions import HttpResponseError
+except Exception:
+ # handled in azure_rm_common
+ pass
+
+
+class AzureRMVirtualNetworkGatewayNatRuleInfo(AzureRMModuleBase):
+ """Utility class to get Azure Kubernetes Service Credentials facts"""
+
+ def __init__(self):
+
+ self.module_args = dict(
+ name=dict(type='str'),
+ resource_group=dict(type='str', required=True),
+ virtual_network_gateway_name=dict(type='str', required=True),
+ )
+
+ self.name = None
+
+ self.results = dict(
+ changed=False,
+ state=[],
+ )
+
+ super(AzureRMVirtualNetworkGatewayNatRuleInfo, self).__init__(derived_arg_spec=self.module_args,
+ supports_check_mode=True,
+ supports_tags=False,
+ facts_module=True)
+
+ def exec_module(self, **kwargs):
+
+ for key in self.module_args:
+ setattr(self, key, kwargs[key])
+
+ if self.name is not None:
+ self.results['state'] = self.get_by_name()
+ else:
+ self.results['state'] = self.list_by_virtual_network_gateway()
+
+ return self.results
+
+ def get_by_name(self):
+ """Gets the nat rule by name"""
+ response = None
+
+ try:
+ response = self.network_client.virtual_network_gateway_nat_rules.get(self.resource_group, self.virtual_network_gateway_name, self.name)
+ except HttpResponseError as ec:
+ self.log("Gets the nat rule by name got a Exception, Exception as {0}".format(ec))
+ if response:
+ return [self.format_response(response)]
+ else:
+ return []
+
+ def list_by_virtual_network_gateway(self):
+ """Gets all the nat rule in the local network gateway"""
+ response = None
+ try:
+ response = self.network_client.virtual_network_gateway_nat_rules.list_by_virtual_network_gateway(self.resource_group,
+ self.virtual_network_gateway_name)
+ except HttpResponseError as ec:
+ self.log("Gets all nat rule by the local network gateway got Exception, Exception as {0}".format(ec))
+
+ if response:
+ return [self.format_response(item) for item in response]
+ else:
+ return []
+
+ def format_response(self, item):
+ result = dict(
+ resource_group=self.resource_group,
+ id=item.id,
+ name=item.name,
+ type=item.type,
+ etag=item.etag,
+ provisioning_state=item.provisioning_state,
+ type_properties_type=item.type_properties_type,
+ mode=item.mode,
+ internal_mappings=list(),
+ external_mappings=list(),
+ ip_configuration_id=item.ip_configuration_id
+ )
+
+ if item.internal_mappings is not None:
+ for value in item.internal_mappings:
+ result['internal_mappings'].append(value.address_space)
+ if item.external_mappings is not None:
+ for value in item.external_mappings:
+ result['external_mappings'].append(value.address_space)
+ return result
+
+
+def main():
+ """Main module execution code path"""
+
+ AzureRMVirtualNetworkGatewayNatRuleInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmsku_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmsku_info.py
new file mode 100644
index 000000000..a367bd692
--- /dev/null
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_vmsku_info.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2024
+# Nir Argaman <nargaman@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: azure_rm_vmsku_info
+
+version_added: "2.4.0"
+
+short_description: Get compute-related SKUs list
+
+description:
+ - Get details for compute-related resource SKUs.
+
+options:
+ location:
+ description:
+ - A region supported by current subscription.
+ type: str
+ resource_type:
+ description:
+ - Resource types e.g. "availabilitySets", "snapshots", "disks", etc.
+ type: str
+ size:
+ description:
+ - Size name, partial name is accepted.
+ type: str
+ zone:
+ description:
+ - Show skus supporting availability zones.
+ type: bool
+ default: False
+
+extends_documentation_fragment:
+ - azure.azcollection.azure
+
+author:
+ - Nir Argaman (@nirarg)
+
+'''
+
+EXAMPLES = '''
+- name: Gather Resource Group info
+ azure.azcollection.azure_rm_resourcegroup_info:
+ name: "{{ resource_group }}"
+ register: rg_info
+
+- name: List available VM SKUs
+ azure.azcollection.azure_rm_vmsku_info:
+ location: "{{ rg_info.resourcegroups.0.location }}"
+ resource_type: "virtualMachines"
+ size: "standard_B1"
+ zone: true
+ register: available_skus_result
+'''
+
+RETURN = '''
+available_skus:
+ description:
+ - List of compute-related resource SKUs.
+ returned: always
+ type: complex
+ contains:
+ resource_type:
+ description:
+ - The type of resource the SKU applies to.
+ returned: always
+ type: str
+ sample: "virtual_machine"
+ name:
+ description:
+ - The name of SKU.
+ returned: always
+ type: str
+ sample: "Standard_B1s"
+ tier:
+ description:
+ - Specifies the tier of virtual machines in a scale set.
+ returned: always
+ type: str
+ sample: "Standard"
+ size:
+ description:
+ - The Size of the SKU.
+ returned: always
+ type: str
+ sample: "B1s"
+ family:
+ description:
+ - The Family of this particular SKU.
+ returned: always
+ type: str
+ sample: "standardBSFamily"
+ locations:
+ description:
+ - The set of locations that the SKU is available.
+ returned: always
+ type: list
+ sample: ["eastus"]
+ location_info:
+ description:
+ - A list of locations and availability zones in those locations where the SKU is available.
+ returned: always
+ type: complex
+ contains:
+ location:
+ description:
+ - Location of the SKU.
+ type: str
+ returned: always
+ sample: "eastus"
+ zones:
+ description:
+ - List of availability zones where the SKU is supported.
+ type: list
+ returned: always
+ sample: ["1", "2", "3"]
+ zone_details:
+ description:
+ - Details of capabilities available to a SKU in specific zones.
+ returned: always
+ type: complex
+ contains:
+ capabilities:
+ description:
+ - A list of capabilities that are available for the SKU in the specified list of zones.
+ type: complex
+ returned: always
+ contains:
+ name:
+ description:
+ - An invariant to describe the feature.
+ type: str
+ returned: always
+ sample: "ultrassdavailable"
+ value:
+ description:
+ - An invariant if the feature is measured by quantity.
+ type: str
+ returned: always
+ sample: "True"
+ capabilities:
+ description:
+ - A name value pair to describe the capability.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description:
+ - An invariant to describe the feature.
+ type: str
+ returned: always
+ sample: "ultrassdavailable"
+ value:
+ description:
+ - An invariant if the feature is measured by quantity.
+ type: str
+ returned: always
+ sample: "True"
+ restrictions:
+ description:
+ - The restrictions because of which SKU cannot be used. This is empty if there are no restrictions.
+ returned: always
+ type: complex
+ contains:
+ type:
+ description:
+ - The type of restrictions.
+ type: str
+ returned: always
+ sample: "location"
+ values:
+ description:
+ - The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted.
+ type: str
+ returned: always
+ sample: ["eastus"]
+ restriction_info:
+ description:
+ - The information about the restriction where the SKU cannot be used.
+ returned: always
+ type: complex
+ contains:
+ locations:
+ description:
+ - Locations where the SKU is restricted.
+ type: list
+ sample: ["location"]
+ zones:
+ description:
+ - List of availability zones where the SKU is restricted.
+ type: list
+ sample: ["1", "2"]
+ reason_code:
+ description:
+ - The reason for restriction.
+ type: str
+ sample: "QuotaId"
+'''
+
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.mgmt.compute import ComputeManagementClient
+ from azure.core.exceptions import HttpResponseError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMVmskuInfo(AzureRMModuleBase):
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ location=dict(type='str'),
+ resource_type=dict(type='str'),
+ size=dict(type='str'),
+ zone=dict(type='bool', default=False)
+ )
+
+ self.results = dict(
+ available_skus=[],
+ count=0
+ )
+ self.location = None
+ self.resource_type = None
+ self.size = None
+ self.zone = False
+
+ super(AzureRMVmskuInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=False)
+
+ def list_skus(self):
+ try:
+ compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2021-07-01')
+ skus_result = compute_client.resource_skus.list()
+ available_skus = []
+ for sku_info in skus_result:
+ if self.location and not _match_location(self.location, sku_info.locations):
+ continue
+ if not _is_sku_available(sku_info, self.zone):
+ continue
+ if self.resource_type and not sku_info.resource_type.lower() == self.resource_type.lower():
+ continue
+ if self.size and not (sku_info.resource_type == 'virtualMachines' and self.size.lower() in sku_info.name.lower()):
+ continue
+ if self.zone and not (sku_info.location_info and sku_info.location_info[0].zones):
+ continue
+ available_skus.append(sku_info.as_dict())
+ return available_skus
+ except HttpResponseError as e:
+ # Handle exceptions
+ raise e
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ available_skus = self.list_skus()
+ self.results['available_skus'] = available_skus
+ self.results['count'] = len(available_skus)
+ return self.results
+
+
+def _match_location(loc, locations):
+ return next((x for x in locations if x.lower() == loc.lower()), None)
+
+
+def _is_sku_available(sku_info, zone):
+ """
+ The SKU is unavailable in the following cases:
+ 1. regional restriction and the region is restricted
+ 2. parameter "zone" is input which indicates only showing skus with availability zones.
+ Meanwhile, zonal restriction and all zones are restricted
+ """
+ is_available = True
+ is_restrict_zone = False
+ is_restrict_location = False
+ if not sku_info.restrictions:
+ return is_available
+ for restriction in sku_info.restrictions:
+ if restriction.reason_code == 'NotAvailableForSubscription':
+ if restriction.type == 'Zone' and not (
+ set(sku_info.location_info[0].zones or []) - set(restriction.restriction_info.zones or [])):
+ is_restrict_zone = True
+ if restriction.type == 'Location' and (
+ sku_info.location_info[0].location in (restriction.restriction_info.locations or [])):
+ is_restrict_location = True
+ if is_restrict_location or (is_restrict_zone and zone):
+ is_available = False
+ break
+ return is_available
+
+
+def main():
+ AzureRMVmskuInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py
index e58cbcd43..622c5dd5a 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp.py
@@ -213,7 +213,191 @@ options:
- stopped
- restarted
default: started
-
+ site_auth_settings:
+ description:
+ - Configuration settings for the Azure App Service Authentication / Authorization feature.
+ type: dict
+ suboptions:
+ kind:
+ description:
+ - Kind of resource.
+ type: str
+ enabled:
+ description:
+ - Whether enable or disable the Authentication / Authorization feature for the current app.
+ type: bool
+ runtime_version:
+ description:
+ - The RuntimeVersion of the Authentication / Authorization feature in use for the current app.
+ type: str
+ unauthenticated_client_action:
+ description:
+ - The action to take when an unauthenticated client attempts to access the app.
+ type: str
+ choices:
+ - RedirectToLoginPage
+ - AllowAnonymous
+ token_store_enabled:
+ description:
+ - Whether to use App Service Token Store.
+ type: bool
+ allowed_external_redirect_urls:
+ description:
+ - External URLs that can be redirected to as part of logging in or logging out of the app.
+ - Note that the query string part of the URL is ignored.
+ type: list
+ elements: str
+ default_provider:
+ description:
+ - The default authentication provider to use when multiple providers are configured.
+ type: str
+ choices:
+ - AzureActiveDirectory
+ - Facebook
+ - Google
+ - MicrosoftAccount
+ - Twitter
+ - Github
+ token_refresh_extension_hours:
+ description:
+ - The number of hours after session token expiration that a session token can be used to call the token refresh API.
+ type: float
+ client_id:
+ description:
+ - The Client ID of this relying party application, known as the client_id.
+ type: str
+ client_secret:
+ description:
+ - The Client Secret of this relying party application (in Azure Active Directory, this is also referred to as the Key).
+ type: str
+ client_secret_setting_name:
+ description:
+ - The app setting name that contains the client secret of the relying party application.
+ type: str
+ client_secret_certificate_thumbprint:
+ description:
+ - An alternative to the client secret, that is the thumbprint of a certificate used for signing purposes.
+ - This property acts as a replacement for the Client Secret. It is also optional.
+ type: str
+ issuer:
+ description:
+ - The OpenID Connect Issuer URI that represents the entity which issues access tokens for this application.
+ type: str
+ allowed_audiences:
+ description:
+ - Allowed audience values to consider when validating JWTs issued by Azure Active Directory.
+ type: list
+ elements: str
+ aad_claims_authorization:
+ description:
+ - Gets a JSON string containing the Azure AD Acl settings.
+ type: str
+ additional_login_params:
+ description:
+ - Login parameters to send to the OpenID Connect authorization endpoint when a user logs in.
+ - Each parameter must be in the form "key=value".
+ type: str
+ google_client_id:
+ description:
+ - The OpenID Connect Client ID for the Google web application.
+ type: str
+ google_client_secret:
+ description:
+ - The client secret associated with the Google web application.
+ type: str
+ google_client_secret_setting_name:
+ description:
+ - The app setting name that contains the client secret associated with the Google web application.
+ type: str
+ google_o_auth_scopes:
+ description:
+ - The OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication.
+ - This setting is optional. If not specified, "openid", "profile", and "email" are used as default scopes.
+ type: list
+ elements: str
+ facebook_app_id:
+ description:
+ - The App ID of the Facebook app used for login.
+ type: str
+ facebook_app_secret:
+ description:
+ - The App Secret of the Facebook app used for Facebook Login.
+ type: str
+ facebook_app_secret_setting_name:
+ description:
+ - The app setting name that contains the app secret used for Facebook Login.
+ type: str
+ facebook_o_auth_scopes:
+ description:
+ - The OAuth 2.0 scopes that will be requested as part of Facebook for Facebook Login.
+ type: list
+ elements: str
+ git_hub_client_id:
+ description:
+ - The Client Id of the GitHub app used for login.
+ type: str
+ git_hub_client_secret:
+ description:
+ - The Client Secret of the GitHub app used for Github Login.
+ type: str
+ git_hub_client_secret_setting_name:
+ description:
+ - The app setting name that contains the client secret of the Github app used for GitHub Login.
+ type: str
+ git_hub_o_auth_scopes:
+ description:
+ - The OAuth 2.0 scopes that will be requested as part of GitHub Login authentication.
+ - This setting is optional.
+ type: list
+ elements: str
+ twitter_consumer_key:
+ description:
+ - The OAuth 1.0a consumer key of the Twitter application used for sign-in.
+ type: str
+ twitter_consumer_secret:
+ description:
+ - The OAuth 1.0a consumer secret of the Twitter application used for sign-in.
+ - This setting is required for enabling Twitter Sign-In.
+ type: str
+ twitter_consumer_secret_setting_name:
+ description:
+ - The app setting name that contains the OAuth 1.0a consumer secret of the Twitter application used for sign-in.
+ type: str
+ microsoft_account_client_id:
+ description:
+ - The OAuth 2.0 client ID that was created for the app used for authentication.
+ - This setting is required for enabling Microsoft Account authentication.
+ type: str
+ microsoft_account_client_secret:
+ description:
+ - The OAuth 2.0 client secret that was created for the app used for authentication.
+ type: str
+ microsoft_account_client_secret_setting_name:
+ description:
+ - The app setting name containing the OAuth 2.0 client secret that was created for the app used for authentication.
+ type: str
+ microsoft_account_o_auth_scopes:
+ description:
+ - The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication.
+ type: list
+ elements: str
+ is_auth_from_file:
+ description:
+ - If I(is_auth_from_file=true), the auth config settings should be read from a file.
+ type: str
+ choices:
+ - 'true'
+ - 'false'
+ auth_file_path:
+ description:
+ - The path of the config file containing auth settings.
+ - If the path is relative, base will the site's root directory.
+ type: str
+ config_version:
+ description:
+ - The ConfigVersion of the Authentication / Authorization feature in use for the current app.
+ - The setting in this value can control the behavior of the control plane for Authentication / Authorization.
+ type: str
state:
description:
- State of the Web App.
@@ -337,6 +521,26 @@ EXAMPLES = '''
java_container: "Tomcat"
java_container_version: "8.5"
+- name: Create a windows web app with site_auth_settings
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myWindowWebapp
+ site_auth_settings:
+ client_id: 'xxxxxxxx-xxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
+ default_provider: 'MicrosoftAccount'
+ runtime_version: '-2'
+ token_refresh_extension_hours: 120
+ unauthenticated_client_action: 'RedirectToLoginPage'
+ client_secret: 'xxxxxxxx-xxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
+ token_store_enabled: false
+ enabled: true
+ is_auth_from_file: false
+ plan:
+ resource_group: myResourceGroup
+ name: myLinuxwebapp
+ is_linux: false
+ sku: S1
+
- name: Create a linux web app with python framework
azure_rm_webapp:
resource_group: myResourceGroup
@@ -360,11 +564,12 @@ id:
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp"
'''
-from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
try:
from azure.core.exceptions import ResourceNotFoundError
from azure.core.polling import LROPoller
+ from azure.core.exceptions import HttpResponseError
from azure.mgmt.web.models import Site, AppServicePlan, SkuDescription, NameValuePair, SiteSourceControl, StringDictionary
except ImportError:
# This is handled in azure_rm_common
@@ -399,6 +604,48 @@ framework_spec = dict(
)
+site_auth_settings_spec = dict(
+ kind=dict(type='str'),
+ enabled=dict(type='bool'),
+ runtime_version=dict(type='str'),
+ unauthenticated_client_action=dict(type='str', choices=["RedirectToLoginPage", "AllowAnonymous"]),
+ token_store_enabled=dict(type='bool'),
+ allowed_external_redirect_urls=dict(type='list', elements='str'),
+ default_provider=dict(type='str', choices=["Facebook", "Google", "MicrosoftAccount", "Twitter", "Github", "AzureActiveDirectory"]),
+ token_refresh_extension_hours=dict(type='float', no_log=True),
+ client_id=dict(type='str'),
+ client_secret=dict(type='str', no_log=True),
+ client_secret_setting_name=dict(type='str'),
+ client_secret_certificate_thumbprint=dict(type='str', no_log=True),
+ issuer=dict(type='str'),
+ allowed_audiences=dict(type='list', elements='str'),
+ additional_login_params=dict(type='str'),
+ aad_claims_authorization=dict(type='str'),
+ google_client_id=dict(type='str'),
+ google_client_secret=dict(type='str', no_log=True),
+ google_client_secret_setting_name=dict(type='str'),
+ google_o_auth_scopes=dict(type='list', elements='str'),
+ facebook_app_id=dict(type='str'),
+ facebook_app_secret=dict(type='str', no_log=True),
+ facebook_app_secret_setting_name=dict(type='str'),
+ facebook_o_auth_scopes=dict(type='list', elements='str'),
+ git_hub_client_id=dict(type='str'),
+ git_hub_client_secret=dict(type='str', no_log=True),
+ git_hub_client_secret_setting_name=dict(type='str'),
+ git_hub_o_auth_scopes=dict(type='list', elements='str'),
+ twitter_consumer_key=dict(type='str', no_log=True),
+ twitter_consumer_secret=dict(type='str', no_log=True),
+ twitter_consumer_secret_setting_name=dict(type='str'),
+ microsoft_account_client_id=dict(type='str'),
+ microsoft_account_client_secret=dict(type='str', no_log=True),
+ microsoft_account_client_secret_setting_name=dict(type='str'),
+ microsoft_account_o_auth_scopes=dict(type='list', elements='str'),
+ is_auth_from_file=dict(type='str', choices=['true', 'false']),
+ auth_file_path=dict(type='str'),
+ config_version=dict(type='str')
+)
+
+
def _normalize_sku(sku):
if sku is None:
return sku
@@ -462,10 +709,10 @@ def webapp_to_dict(webapp):
class Actions:
- CreateOrUpdate, UpdateAppSettings, Delete = range(3)
+ CreateOrUpdate, UpdateAppSettings, UpdateAuthSettings, Delete = range(4)
-class AzureRMWebApps(AzureRMModuleBase):
+class AzureRMWebApps(AzureRMModuleBaseExt):
"""Configuration class for an Azure RM Web App resource"""
def __init__(self):
@@ -536,6 +783,10 @@ class AzureRMWebApps(AzureRMModuleBase):
choices=['started', 'stopped', 'restarted'],
default='started'
),
+ site_auth_settings=dict(
+ type='dict',
+ options=site_auth_settings_spec
+ ),
state=dict(
type='str',
default='present',
@@ -559,6 +810,7 @@ class AzureRMWebApps(AzureRMModuleBase):
self.site_config = dict()
self.app_settings = dict()
self.app_settings_strDic = None
+ self.site_auth_settings = None
# app service plan
self.plan = None
@@ -631,6 +883,7 @@ class AzureRMWebApps(AzureRMModuleBase):
if old_response:
self.results['id'] = old_response['id']
+ self.results['site_auth_settings'] = self.get_auth_settings()
if self.state == 'present':
if not self.plan and not old_response:
@@ -723,6 +976,8 @@ class AzureRMWebApps(AzureRMModuleBase):
to_be_updated = True
self.to_do.append(Actions.CreateOrUpdate)
+ if self.site_auth_settings is not None:
+ self.to_do.append(Actions.UpdateAuthSettings)
self.site.tags = self.tags
# service plan is required for creation
@@ -764,6 +1019,7 @@ class AzureRMWebApps(AzureRMModuleBase):
if update_tags:
to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
# check if root level property changed
if self.is_updatable_property_changed(old_response):
@@ -799,6 +1055,12 @@ class AzureRMWebApps(AzureRMModuleBase):
for key in self.app_settings.keys():
self.app_settings_strDic[key] = self.app_settings[key]
+ if self.site_auth_settings is not None:
+ result = dict(compare=[])
+ if not self.default_compare({}, self.site_auth_settings, self.results['site_auth_settings'], '', dict(compare=[])):
+ to_be_updated = True
+ self.to_do.append(Actions.UpdateAuthSettings)
+
elif self.state == 'absent':
if old_response:
self.log("Delete Web App instance")
@@ -830,6 +1092,12 @@ class AzureRMWebApps(AzureRMModuleBase):
update_response = self.update_app_settings()
self.results['id'] = update_response.id
+ if Actions.UpdateAuthSettings in self.to_do:
+ auth_settings = self.update_auth_settings(self.site_auth_settings)
+ self.results['site_auth_settings'] = auth_settings
+ else:
+ self.results['site_auth_settings'] = self.get_auth_settings()
+
webapp = None
if old_response:
webapp = old_response
@@ -1026,6 +1294,36 @@ class AzureRMWebApps(AzureRMModuleBase):
self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
+ def update_auth_settings(self, body):
+ '''
+ Update web app auth settings
+ :return: deserialized updating response
+ '''
+ self.log("Update web app auth settings")
+
+ try:
+ response = self.web_client.web_apps.update_auth_settings(self.resource_group, self.name, body)
+ self.log("Response : {0}".format(response))
+ return response.as_dict()
+ except HttpResponseError as ex:
+ self.fail("Failed to update web app auth settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def get_auth_settings(self):
+ '''
+ Get the web app auth settings
+ :return: deserialized updating response
+ '''
+ self.log("Get the web app auth settings")
+
+ try:
+ response = self.web_client.web_apps.get_auth_settings(self.resource_group, self.name)
+ self.log("Response : {0}".format(response))
+ return response.as_dict()
+ except HttpResponseError as ex:
+ self.log("Failed to Get web app auth settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
def update_app_settings(self):
'''
Update application settings
@@ -1040,9 +1338,8 @@ class AzureRMWebApps(AzureRMModuleBase):
response = self.web_client.web_apps.update_application_settings(
resource_group_name=self.resource_group, name=self.name, app_settings=settings)
self.log("Response : {0}".format(response))
-
return response
- except Exception as ex:
+ except HttpResponseError as ex:
self.fail("Failed to update application settings for web app {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py
index c0ec6b42d..22a5fea73 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webapp_info.py
@@ -244,11 +244,18 @@ webapps:
returned: always
type: dict
sample: { tag1: abc }
+ site_auth_settings:
+ description:
+ - The Authentication / Authorization settings associated with web app.
+ type: dict
+ returned: always
+ sample: {}
'''
try:
from azure.core.exceptions import ResourceNotFoundError
from azure.core.polling import LROPoller
from azure.mgmt.web.models import CsmPublishingProfileOptions
+ from azure.core.exceptions import HttpResponseError
except Exception:
# This is handled in azure_rm_common
pass
@@ -390,6 +397,14 @@ class AzureRMWebAppInfo(AzureRMModuleBase):
self.fail('Error getting web app {0} publishing credentials - {1}'.format(request_id, str(ex)))
return response
+ def get_auth_settings(self, resource_group, name):
+ self.log('Get web app {0} auth settings'.format(name))
+ try:
+ response = self.web_client.web_apps.get_auth_settings(resource_group_name=resource_group, name=name)
+ return response.as_dict()
+ except HttpResponseError as ex:
+ self.log('Error getting web app {0} auth setting, exception as {1}'.format(name, str(ex)))
+
def get_webapp_ftp_publish_url(self, resource_group, name):
self.log('Get web app {0} app publish profile'.format(name))
@@ -430,6 +445,7 @@ class AzureRMWebAppInfo(AzureRMModuleBase):
app_settings = self.list_webapp_appsettings(resource_group, name)
publish_cred = self.get_publish_credentials(resource_group, name)
ftp_publish_url = self.get_webapp_ftp_publish_url(resource_group, name)
+ site_auth_settings = self.get_auth_settings(resource_group, name)
except Exception:
pass
return self.construct_curated_webapp(webapp=pip,
@@ -437,7 +453,8 @@ class AzureRMWebAppInfo(AzureRMModuleBase):
app_settings=app_settings,
deployment_slot=None,
ftp_publish_url=ftp_publish_url,
- publish_credentials=publish_cred)
+ publish_credentials=publish_cred,
+ site_auth_settings=site_auth_settings)
def construct_curated_webapp(self,
webapp,
@@ -445,7 +462,8 @@ class AzureRMWebAppInfo(AzureRMModuleBase):
app_settings=None,
deployment_slot=None,
ftp_publish_url=None,
- publish_credentials=None):
+ publish_credentials=None,
+ site_auth_settings=None):
curated_output = dict()
curated_output['id'] = webapp['id']
curated_output['name'] = webapp['name']
@@ -514,6 +532,9 @@ class AzureRMWebAppInfo(AzureRMModuleBase):
if publish_credentials and self.return_publish_profile:
curated_output['publishing_username'] = publish_credentials.publishing_user_name
curated_output['publishing_password'] = publish_credentials.publishing_password
+
+ # curated auth settings
+ curated_output['site_auth_settings'] = site_auth_settings if site_auth_settings is not None else {}
return curated_output
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py
index bfe2b7591..4f44067b1 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction.py
@@ -39,7 +39,6 @@ options:
- The web app's HTTP access restrictions.
type: list
elements: dict
- default: []
suboptions:
name:
description:
@@ -66,7 +65,28 @@ options:
description:
- IPv4 address (with subnet mask) of the access restriction.
type: str
- required: true
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ tag:
+ description:
+ - IP restriction rule description.
+ type: str
+ choices:
+ - Default
+ - XffProxy
+ - ServiceTag
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ - Only I(ip_adress) or I(vnet_subnet_resource_id) property must be specified.
+ - Parameter I(vnet_subnet_resource_id) cannot be used with I(subnet_traffic_tag) or I(vnet_traffic_tag) at the same time.
+ type: str
scm_ip_security_restrictions:
description:
- >-
@@ -74,7 +94,6 @@ options:
the SCM restrictions will be configured but not used.
type: list
elements: dict
- default: []
suboptions:
name:
description:
@@ -101,7 +120,28 @@ options:
description:
- IPv4 address (with subnet mask) of the access restriction.
type: str
- required: true
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ tag:
+ description:
+ - IP restriction rule description.
+ type: str
+ choices:
+ - Default
+ - XffProxy
+ - ServiceTag
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ - Only I(ip_adress) or I(vnet_subnet_resource_id) property must be specified.
+ - Parameter I(vnet_subnet_resource_id) cannot be used with I(subnet_traffic_tag) or I(vnet_traffic_tag) at the same time.
+ type: str
scm_ip_security_restrictions_use_main:
description:
- >-
@@ -131,6 +171,12 @@ EXAMPLES = '''
action: "Allow"
ip_address: "2.2.2.2/24"
priority: 2
+ - name: "Datacenter 3"
+ action: Allow
+ priority: 3
+ description: "for test 02"
+ tag: XffProxy
+ vnet_subnet_resource_id: "{{ subnet_output.state.id }}"
scm_ip_security_restrictions_use_main: true
- name: Delete web app network access restrictions.
@@ -178,6 +224,30 @@ ip_security_restrictions:
returned: always
type: str
sample: 1.1.1.1/32
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ returned: always
+ sample: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ returned: always
+ sample: 3
+ tag:
+ description:
+ - IP restriction rule description.
+ type: str
+ returned: always
+ sample: default
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworks/vnet01/subnets/subnet01"
scm_ip_security_restrictions:
description:
- The web app's SCM access restrictions.
@@ -215,6 +285,30 @@ scm_ip_security_restrictions:
returned: always
type: str
sample: 1.1.1.1/32
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ returned: always
+ sample: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ returned: always
+ sample: 3
+ tag:
+ description:
+ - IP restriction rule description.
+ type: str
+ returned: always
+ sample: default
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworks/vnet01/subnets/subnet01"
scm_ip_security_restrictions_use_main:
description:
- Whether the HTTP access restrictions are used for SCM access.
@@ -223,7 +317,7 @@ scm_ip_security_restrictions_use_main:
sample: false
'''
-from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
+from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
try:
from azure.mgmt.web.models import IpSecurityRestriction
@@ -236,11 +330,15 @@ ip_restriction_spec = dict(
description=dict(type='str'),
action=dict(type='str', default='Allow', choices=['Allow', 'Deny']),
priority=dict(type='int', required=True),
- ip_address=dict(type='str', required=True),
+ ip_address=dict(type='str'),
+ vnet_subnet_resource_id=dict(type='str'),
+ vnet_traffic_tag=dict(type='int'),
+ subnet_traffic_tag=dict(type='int'),
+ tag=dict(type='str', choices=["Default", "XffProxy", "ServiceTag"]),
)
-class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
+class AzureRMWebAppAccessRestriction(AzureRMModuleBaseExt):
def __init__(self):
@@ -248,8 +346,8 @@ class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
name=dict(type='str', required=True),
resource_group=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
- ip_security_restrictions=dict(type='list', default=[], elements='dict', options=ip_restriction_spec),
- scm_ip_security_restrictions=dict(type='list', default=[], elements='dict', options=ip_restriction_spec),
+ ip_security_restrictions=dict(type='list', elements='dict', options=ip_restriction_spec),
+ scm_ip_security_restrictions=dict(type='list', elements='dict', options=ip_restriction_spec),
scm_ip_security_restrictions_use_main=dict(type='bool', default=False),
)
@@ -263,8 +361,8 @@ class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
self.state = None
self.name = None
self.resource_group = None
- self.ip_security_restrictions = []
- self.scm_ip_security_restrictions = []
+ self.ip_security_restrictions = None
+ self.scm_ip_security_restrictions = None
self.scm_ip_security_restrictions_use_main = False
super(AzureRMWebAppAccessRestriction, self).__init__(self.module_arg_spec,
@@ -318,9 +416,16 @@ class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
return site_config
def has_updates(self, site_config):
- return (site_config.scm_ip_security_restrictions_use_main != self.scm_ip_security_restrictions_use_main or self.ip_security_restrictions and
- self.ip_security_restrictions != self.to_restriction_dict_list(site_config.ip_security_restrictions) or self.scm_ip_security_restrictions and
- self.scm_ip_security_restrictions != self.to_restriction_dict_list(site_config.scm_ip_security_restrictions))
+ changed = False
+ if site_config.scm_ip_security_restrictions_use_main != self.scm_ip_security_restrictions_use_main:
+ changed = True
+ elif not self.default_compare({}, self.ip_security_restrictions,
+ self.to_restriction_dict_list(site_config.ip_security_restrictions), '', dict(compare=[])):
+ changed = True
+ elif not self.default_compare({}, self.scm_ip_security_restrictions,
+ self.to_restriction_dict_list(site_config.scm_ip_security_restrictions), '', dict(compare=[])):
+ changed = True
+ return changed
def has_access_restrictions(self, site_config):
return site_config.ip_security_restrictions or site_config.scm_ip_security_restrictions
@@ -356,6 +461,10 @@ class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
action=restriction_dict['action'],
priority=restriction_dict['priority'],
ip_address=restriction_dict['ip_address'],
+ vnet_subnet_resource_id=restriction_dict['vnet_subnet_resource_id'],
+ vnet_traffic_tag=restriction_dict['vnet_traffic_tag'],
+ subnet_traffic_tag=restriction_dict['subnet_traffic_tag'],
+ tag=restriction_dict['tag'],
)
def to_restriction_dict_list(self, restriction_obj_list):
@@ -379,6 +488,10 @@ class AzureRMWebAppAccessRestriction(AzureRMModuleBase):
action=restriction_obj.action,
priority=restriction_obj.priority,
ip_address=restriction_obj.ip_address,
+ vnet_subnet_resource_id=restriction_obj.vnet_subnet_resource_id,
+ vnet_traffic_tag=restriction_obj.vnet_traffic_tag,
+ subnet_traffic_tag=restriction_obj.subnet_traffic_tag,
+ tag=restriction_obj.tag,
)
diff --git a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py
index 2d07bc2de..901d6b806 100644
--- a/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py
+++ b/ansible_collections/azure/azcollection/plugins/modules/azure_rm_webappaccessrestriction_info.py
@@ -79,6 +79,30 @@ ip_security_restrictions:
returned: always
type: str
sample: 1.1.1.1/32
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ returned: always
+ sample: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ returned: always
+ sample: 3
+ tags:
+ description:
+ - IP restriction rule description.
+ type: str
+ returned: always
+ sample: default
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworks/vnet01/subnets/subnet01"
scm_ip_security_restrictions:
description:
- The web app's SCM access restrictions.
@@ -116,6 +140,30 @@ scm_ip_security_restrictions:
returned: always
type: str
sample: 1.1.1.1/32
+ subnet_traffic_tag:
+ description:
+ - (internal) Subnet traffic tags.
+ type: int
+ returned: always
+ sample: int
+ vnet_traffic_tag:
+ description:
+ - (internal) Vnet traffic tag.
+ type: int
+ returned: always
+ sample: 3
+ tag:
+ description:
+ - IP restriction rule description.
+ type: str
+ returned: always
+ sample: default
+ vnet_subnet_resource_id:
+ description:
+ - The Virtual network relaste subnet resource id.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxx-xxx/resourceGroups/testRG/providers/Microsoft.Network/virtualNetworks/vnet01/subnets/subnet01"
scm_ip_security_restrictions_use_main:
description:
- Whether the HTTP access restrictions are used for SCM access.
@@ -196,6 +244,10 @@ class AzureRMWebAppAccessRestrictionInfo(AzureRMModuleBase):
action=restriction_obj.action,
priority=restriction_obj.priority,
ip_address=restriction_obj.ip_address,
+ vnet_subnet_resource_id=restriction_obj.vnet_subnet_resource_id,
+ vnet_traffic_tag=restriction_obj.vnet_traffic_tag,
+ subnet_traffic_tag=restriction_obj.subnet_traffic_tag,
+ tag=restriction_obj.tag,
)