summaryrefslogtreecommitdiffstats
path: root/ansible_collections/community/vmware/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/community/vmware/plugins')
-rw-r--r--ansible_collections/community/vmware/plugins/connection/__init__.py0
-rw-r--r--ansible_collections/community/vmware/plugins/connection/vmware_tools.py594
-rw-r--r--ansible_collections/community/vmware/plugins/doc_fragments/__init__.py0
-rw-r--r--ansible_collections/community/vmware/plugins/doc_fragments/vmware.py123
-rw-r--r--ansible_collections/community/vmware/plugins/doc_fragments/vmware_rest_client.py65
-rw-r--r--ansible_collections/community/vmware/plugins/httpapi/__init__.py0
-rw-r--r--ansible_collections/community/vmware/plugins/httpapi/vmware.py86
-rw-r--r--ansible_collections/community/vmware/plugins/inventory/__init__.py0
-rw-r--r--ansible_collections/community/vmware/plugins/inventory/vmware_host_inventory.py513
-rw-r--r--ansible_collections/community/vmware/plugins/inventory/vmware_vm_inventory.py901
-rw-r--r--ansible_collections/community/vmware/plugins/module_utils/__init__.py0
-rw-r--r--ansible_collections/community/vmware/plugins/module_utils/version.py28
-rw-r--r--ansible_collections/community/vmware/plugins/module_utils/vm_device_helper.py516
-rw-r--r--ansible_collections/community/vmware/plugins/module_utils/vmware.py1999
-rw-r--r--ansible_collections/community/vmware/plugins/module_utils/vmware_rest_client.py574
-rw-r--r--ansible_collections/community/vmware/plugins/module_utils/vmware_spbm.py60
-rw-r--r--ansible_collections/community/vmware/plugins/modules/__init__.py0
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vcenter_domain_user_group_info.py190
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vcenter_extension.py223
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vcenter_extension_info.py107
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vcenter_folder.py365
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vcenter_license.py272
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vcenter_standard_key_provider.py690
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_about_info.py117
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_category.py378
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_category_info.py120
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_cfg_backup.py233
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_cluster.py218
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_cluster_dpm.py186
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_cluster_drs.py242
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_cluster_drs_recommendations.py119
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_cluster_ha.py535
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_cluster_info.py344
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_cluster_vcls.py204
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_cluster_vsan.py282
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_content_deploy_ovf_template.py441
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_content_deploy_template.py460
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_content_library_info.py168
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_content_library_manager.py438
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_custom_attribute.py175
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_custom_attribute_manager.py242
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_datacenter.py163
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_datacenter_info.py189
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_datastore.py244
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_datastore_cluster.py294
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_datastore_cluster_manager.py232
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_datastore_info.py349
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_datastore_maintenancemode.py213
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_deploy_ovf.py752
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_drs_group.py569
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_drs_group_info.py271
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_drs_group_manager.py518
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_drs_rule_info.py254
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_dvs_host.py411
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup.py1030
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup_find.py205
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup_info.py310
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_dvswitch.py1064
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_info.py302
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_lacp.py431
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_nioc.py422
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_pvlans.py526
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_uplink_pg.py505
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_evc_mode.py228
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_export_ovf.py361
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_first_class_disk.py267
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_folder_info.py224
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest.py3601
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_boot_info.py203
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_boot_manager.py421
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_controller.py555
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_cross_vc_clone.py418
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_custom_attribute_defs.py141
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_custom_attributes.py358
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_customization_info.py193
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_disk.py1237
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_disk_info.py198
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_file_operation.py520
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_find.py145
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_info.py320
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_instant_clone.py591
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_move.py246
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_network.py829
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_powerstate.py349
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_register_operation.py283
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_screenshot.py277
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_sendkey.py405
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_serial_port.py580
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_snapshot.py462
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_snapshot_info.py183
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_storage_policy.py458
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_info.py193
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_upgrade.py220
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_wait.py208
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_tpm.py227
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_vgpu.py390
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_vgpu_info.py175
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_guest_video.py368
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host.py830
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_acceptance.py186
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_active_directory.py360
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_auto_start.py379
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_capability_info.py222
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_config_info.py112
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_config_manager.py190
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_custom_attributes.py194
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_datastore.py357
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_disk_info.py166
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_dns.py459
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_dns_info.py122
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_facts.py422
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_feature_info.py135
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_firewall_info.py156
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_firewall_manager.py433
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_hyperthreading.py249
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_ipv6.py227
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_iscsi.py889
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_iscsi_info.py218
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_kernel_manager.py209
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_lockdown.py215
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_lockdown_exceptions.py189
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_logbundle.py269
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_logbundle_info.py142
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_ntp.py387
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_ntp_info.py127
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_package_info.py120
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_passthrough.py359
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_powermgmt_policy.py224
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_powerstate.py218
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_scanhba.py171
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_scsidisk_info.py173
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_service_info.py147
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_service_manager.py219
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_snmp.py535
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_sriov.py367
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_ssl_info.py140
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_tcpip_stacks.py623
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_user_manager.py262
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_vmhba_info.py221
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_host_vmnic_info.py349
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_local_role_info.py131
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_local_role_manager.py403
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_local_user_info.py155
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_local_user_manager.py183
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_maintenancemode.py201
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_migrate_vmk.py230
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_object_custom_attributes_info.py209
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_object_rename.py332
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_object_role_permission.py349
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_object_role_permission_info.py278
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_portgroup.py1046
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_portgroup_info.py217
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_recommended_datastore.py111
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_resource_pool.py490
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_resource_pool_info.py133
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_tag.py266
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_tag_info.py165
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_tag_manager.py424
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_target_canonical_info.py177
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vc_infraprofile_info.py285
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vcenter_settings.py980
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vcenter_settings_info.py209
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vcenter_statistics.py517
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vm_config_option.py316
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vm_host_drs_rule.py427
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vm_info.py479
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vm_shell.py365
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vm_storage_policy.py361
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vm_storage_policy_info.py161
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vm_vm_drs_rule.py409
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vm_vss_dvs_migrate.py151
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vmkernel.py1121
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vmkernel_info.py195
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vmotion.py557
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vsan_cluster.py120
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vsan_hcl_db.py111
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vsan_health_info.py197
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vsan_release_catalog.py100
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vspan_session.py669
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vswitch.py803
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vmware_vswitch_info.py207
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vsan_health_silent_checks.py151
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vsphere_copy.py213
-rw-r--r--ansible_collections/community/vmware/plugins/modules/vsphere_file.py350
-rw-r--r--ansible_collections/community/vmware/plugins/plugin_utils/inventory.py440
185 files changed, 64662 insertions, 0 deletions
diff --git a/ansible_collections/community/vmware/plugins/connection/__init__.py b/ansible_collections/community/vmware/plugins/connection/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/connection/__init__.py
diff --git a/ansible_collections/community/vmware/plugins/connection/vmware_tools.py b/ansible_collections/community/vmware/plugins/connection/vmware_tools.py
new file mode 100644
index 000000000..533ededb1
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/connection/vmware_tools.py
@@ -0,0 +1,594 @@
+# Copyright: (c) 2018, Deric Crago <deric.crago@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ author:
+ - Deric Crago (@dericcrago) <deric.crago@gmail.com>
+ name: vmware_tools
+ short_description: Execute tasks inside a VM via VMware Tools
+ description:
+ - Use VMware tools to run tasks in, or put/fetch files to guest operating systems running in VMware infrastructure.
+ - In case of Windows VMs, set C(ansible_shell_type) to C(powershell).
+ - Does not work with 'become'.
+ requirements:
+ - requests (Python library)
+ options:
+ vmware_host:
+ description:
+ - FQDN or IP Address for the connection (vCenter or ESXi Host).
+ env:
+ - name: VI_SERVER
+ - name: VMWARE_HOST
+ vars:
+ - name: ansible_host
+ - name: ansible_vmware_host
+ required: true
+ vmware_user:
+ description:
+ - Username for the connection.
+ - "Requires the following permissions on the VM:
+ - VirtualMachine.GuestOperations.Execute
+ - VirtualMachine.GuestOperations.Modify
+ - VirtualMachine.GuestOperations.Query"
+ env:
+ - name: VI_USERNAME
+ - name: VMWARE_USER
+ vars:
+ - name: ansible_vmware_user
+ required: true
+ vmware_password:
+ description:
+ - Password for the connection.
+ env:
+ - name: VI_PASSWORD
+ - name: VMWARE_PASSWORD
+ vars:
+ - name: ansible_vmware_password
+ required: true
+ vmware_port:
+ description:
+ - Port for the connection.
+ env:
+ - name: VI_PORTNUMBER
+ - name: VMWARE_PORT
+ vars:
+ - name: ansible_port
+ - name: ansible_vmware_port
+ required: false
+ default: 443
+ validate_certs:
+ description:
+ - Verify SSL for the connection.
+ - "Note: This will validate certs for both C(vmware_host) and the ESXi host running the VM."
+ env:
+ - name: VMWARE_VALIDATE_CERTS
+ vars:
+ - name: ansible_vmware_validate_certs
+ default: true
+ type: bool
+ vm_path:
+ description:
+ - Mutually exclusive with vm_uuid
+ - VM path absolute to the connection.
+ - "vCenter Example: C(Datacenter/vm/Discovered virtual machine/testVM)."
+ - "ESXi Host Example: C(ha-datacenter/vm/testVM)."
+ - Must include VM name, appended to 'folder' as would be passed to M(community.vmware.vmware_guest).
+ - Needs to include I(vm) between the Datacenter and the rest of the VM path.
+ - Datacenter default value for ESXi server is C(ha-datacenter).
+ - Folder I(vm) is not visible in the vSphere Web Client but necessary for VMware API to work.
+ vars:
+ - name: ansible_vmware_guest_path
+ required: false
+ vm_uuid:
+ description:
+ - Mutually exclusive with vm_path
+ - VM UUID to the connection.
+ - UUID of the virtual machine from property config.uuid of vmware_vm_inventory plugin
+ vars:
+ - name: ansible_vmware_guest_uuid
+ required: false
+ vm_user:
+ description:
+ - VM username.
+ - C(ansible_vmware_tools_user) is used for connecting to the VM.
+ - C(ansible_user) is used by Ansible on the VM.
+ vars:
+ - name: ansible_user
+ - name: ansible_vmware_tools_user
+ required: true
+ vm_password:
+ description:
+ - Password for the user in guest operating system.
+ vars:
+ - name: ansible_password
+ - name: ansible_vmware_tools_password
+ required: true
+ exec_command_sleep_interval:
+ description:
+ - Time in seconds to sleep between execution of command.
+ vars:
+ - name: ansible_vmware_tools_exec_command_sleep_interval
+ default: 0.5
+ type: float
+ file_chunk_size:
+ description:
+ - File chunk size.
+ - "(Applicable when writing a file to disk, example: using the C(fetch) module.)"
+ vars:
+ - name: ansible_vmware_tools_file_chunk_size
+ default: 128
+ type: integer
+ executable:
+ description:
+ - shell to use for execution inside container
+ default: /bin/sh
+ ini:
+ - section: defaults
+ key: executable
+ env:
+ - name: ANSIBLE_EXECUTABLE
+ vars:
+ - name: ansible_executable
+ - name: ansible_vmware_tools_executable
+'''
+
+example = r'''
+# example vars.yml
+---
+ansible_connection: vmware_tools
+ansible_user: "{{ ansible_vmware_tools_user }}"
+
+ansible_vmware_host: vcenter.example.com
+ansible_vmware_user: administrator@vsphere.local
+ansible_vmware_password: Secr3tP4ssw0rd!12
+ansible_vmware_validate_certs: false # default is true
+
+# vCenter Connection VM Path Example
+ansible_vmware_guest_path: DATACENTER/vm/FOLDER/{{ inventory_hostname }}
+# ESXi Connection VM Path Example
+ansible_vmware_guest_path: ha-datacenter/vm/{{ inventory_hostname }}
+
+ansible_vmware_tools_user: root
+ansible_vmware_tools_password: MyR00tPassw0rD
+
+# if the target VM guest is Windows set the 'ansible_shell_type' to 'powershell'
+ansible_shell_type: powershell
+
+
+# example playbook_linux.yml
+---
+- name: Test VMware Tools Connection Plugin for Linux
+ hosts: linux
+ tasks:
+ - command: whoami
+
+ - ping:
+
+ - copy:
+ src: foo
+ dest: /home/user/foo
+
+ - fetch:
+ src: /home/user/foo
+ dest: linux-foo
+ flat: true
+
+ - file:
+ path: /home/user/foo
+ state: absent
+
+
+# example playbook_windows.yml
+---
+- name: Test VMware Tools Connection Plugin for Windows
+ hosts: windows
+ tasks:
+ - win_command: whoami
+
+ - win_ping:
+
+ - win_copy:
+ src: foo
+ dest: C:\Users\user\foo
+
+ - fetch:
+ src: C:\Users\user\foo
+ dest: windows-foo
+ flat: true
+
+ - win_file:
+ path: C:\Users\user\foo
+ state: absent
+'''
+
+import re
+from os.path import exists, getsize
+from socket import gaierror
+from ssl import SSLError
+from time import sleep
+import traceback
+
+REQUESTS_IMP_ERR = None
+PYVMOMI_IMP_ERR = None
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+try:
+ from requests.packages import urllib3
+ HAS_URLLIB3 = True
+except ImportError:
+ try:
+ import urllib3
+ HAS_URLLIB3 = True
+ except ImportError:
+ HAS_URLLIB3 = False
+
+from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleConnectionFailure
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.plugins.connection import ConnectionBase
+from ansible.module_utils.basic import missing_required_lib
+
+try:
+ from pyVim.connect import Disconnect, SmartConnect
+ from pyVmomi import vim, vmodl
+
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+ PYVMOMI_IMP_ERR = traceback.format_exc()
+
+
+class Connection(ConnectionBase):
+ """VMware Tools Connection."""
+
+ transport = 'community.vmware.vmware_tools'
+
+ @property
+ def vmware_host(self):
+ """Read-only property holding the connection address."""
+ return self.get_option("vmware_host")
+
+ @property
+ def validate_certs(self):
+ """Read-only property holding whether the connection should validate certs."""
+ return self.get_option("validate_certs")
+
+ @property
+ def authManager(self):
+ """Guest Authentication Manager."""
+ return self._si.content.guestOperationsManager.authManager
+
+ @property
+ def fileManager(self):
+ """Guest File Manager."""
+ return self._si.content.guestOperationsManager.fileManager
+
+ @property
+ def processManager(self):
+ """Guest Process Manager."""
+ return self._si.content.guestOperationsManager.processManager
+
+ @property
+ def windowsGuest(self):
+ """Return if VM guest family is windows."""
+ return self.vm.guest.guestFamily == "windowsGuest"
+
+ def __init__(self, *args, **kwargs):
+ """init."""
+ super(Connection, self).__init__(*args, **kwargs)
+ if hasattr(self, "_shell") and self._shell.SHELL_FAMILY == "powershell":
+ self.module_implementation_preferences = (".ps1", ".exe", "")
+ self.become_methods = ["runas"]
+ self.allow_executable = False
+ self.has_pipelining = False
+ self.allow_extras = True
+ self._si = None
+
+ def _establish_connection(self):
+ connection_kwargs = {
+ "host": self.vmware_host,
+ "user": self.get_option("vmware_user"),
+ "pwd": self.get_option("vmware_password"),
+ "port": self.get_option("vmware_port"),
+ }
+
+ if not self.validate_certs:
+ if HAS_URLLIB3:
+ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+ connection_kwargs['disableSslCertValidation'] = True
+
+ try:
+ self._si = SmartConnect(**connection_kwargs)
+ except SSLError:
+ raise AnsibleError("SSL Error: Certificate verification failed.")
+ except (gaierror):
+ raise AnsibleError("Connection Error: Unable to connect to '%s'." % to_native(connection_kwargs["host"]))
+ except vim.fault.InvalidLogin as e:
+ raise AnsibleError("Connection Login Error: %s" % to_native(e.msg))
+
+ def _establish_vm(self, check_vm_credentials=True):
+ searchIndex = self._si.content.searchIndex
+ self.vm = None
+ if self.get_option("vm_path") is not None:
+ self.vm = searchIndex.FindByInventoryPath(self.get_option("vm_path"))
+ if self.vm is None:
+ raise AnsibleError("Unable to find VM by path '%s'" % to_native(self.get_option("vm_path")))
+ elif self.get_option("vm_uuid") is not None:
+ self.vm = searchIndex.FindByUuid(None, self.get_option("vm_uuid"), True)
+ if self.vm is None:
+ raise AnsibleError("Unable to find VM by uuid '%s'" % to_native(self.get_option("vm_uuid")))
+
+ self.vm_auth = vim.NamePasswordAuthentication(
+ username=self.get_option("vm_user"), password=self.get_option("vm_password"), interactiveSession=False
+ )
+
+ try:
+ if check_vm_credentials:
+ self.authManager.ValidateCredentialsInGuest(vm=self.vm, auth=self.vm_auth)
+ except vim.fault.InvalidPowerState as e:
+ raise AnsibleError("VM Power State Error: %s" % to_native(e.msg))
+ except vim.fault.RestrictedVersion as e:
+ raise AnsibleError("Restricted Version Error: %s" % to_native(e.msg))
+ except vim.fault.GuestOperationsUnavailable as e:
+ raise AnsibleError("VM Guest Operations (VMware Tools) Error: %s" % to_native(e.msg))
+ except vim.fault.InvalidGuestLogin as e:
+ raise AnsibleError("VM Login Error: %s" % to_native(e.msg))
+ except vim.fault.NoPermission as e:
+ raise AnsibleConnectionFailure("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId)))
+ except vmodl.fault.SystemError as e:
+ if e.reason == 'vix error codes = (3016, 0).\n':
+ raise AnsibleConnectionFailure(
+ "Connection failed, is the vm currently rebooting? Reason: %s" % (
+ to_native(e.reason)
+ )
+ )
+ else:
+ raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason)))
+ except vim.fault.GuestOperationsUnavailable:
+ raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable")
+
+ def _connect(self, check_vm_credentials=True):
+ if not HAS_REQUESTS:
+ raise AnsibleError("%s : %s" % (missing_required_lib('requests'), REQUESTS_IMP_ERR))
+
+ if not HAS_PYVMOMI:
+ raise AnsibleError("%s : %s" % (missing_required_lib('PyVmomi'), PYVMOMI_IMP_ERR))
+
+ super(Connection, self)._connect()
+
+ if not self.connected:
+ self._establish_connection()
+ self._establish_vm(check_vm_credentials=check_vm_credentials)
+ self._connected = True
+
+ def close(self):
+ """Close connection."""
+ super(Connection, self).close()
+
+ Disconnect(self._si)
+ self._connected = False
+
+ def reset(self):
+ """Reset the connection to vcenter."""
+ # TODO: Fix persistent connection implementation currently ansible creates new connections to vcenter for each task
+ # therefore we're currently closing a non existing connection here and establish a connection just for being thrown away
+ # right afterwards.
+ self.close()
+ self._connect(check_vm_credentials=False)
+
+ def create_temporary_file_in_guest(self, prefix="", suffix=""):
+ """Create a temporary file in the VM."""
+ try:
+ return self.fileManager.CreateTemporaryFileInGuest(vm=self.vm, auth=self.vm_auth, prefix=prefix, suffix=suffix)
+ except vim.fault.NoPermission as e:
+ raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId)))
+ except vmodl.fault.SystemError as e:
+ if e.reason == 'vix error codes = (3016, 0).\n':
+ raise AnsibleConnectionFailure(
+ "Connection failed, is the vm currently rebooting? Reason: %s" % (
+ to_native(e.reason)
+ )
+ )
+ else:
+ raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason)))
+ except vim.fault.GuestOperationsUnavailable:
+ raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable")
+
+ def _get_program_spec_program_path_and_arguments(self, cmd):
+ if self.windowsGuest:
+ '''
+ we need to warp the execution of powershell into a cmd /c because
+ the call otherwise fails with "Authentication or permission failure"
+ #FIXME: Fix the unecessary invocation of cmd and run the command directly
+ '''
+ program_path = "cmd.exe"
+ arguments = "/c %s" % cmd
+ else:
+ program_path = self.get_option("executable")
+ arguments = re.sub(r"^%s\s*" % program_path, "", cmd)
+
+ return program_path, arguments
+
+ def _get_guest_program_spec(self, cmd, stdout, stderr):
+ guest_program_spec = vim.GuestProgramSpec()
+
+ program_path, arguments = self._get_program_spec_program_path_and_arguments(cmd)
+
+ arguments += " 1> %s 2> %s" % (stdout, stderr)
+
+ guest_program_spec.programPath = program_path
+ guest_program_spec.arguments = arguments
+
+ return guest_program_spec
+
+ def _get_pid_info(self, pid):
+ try:
+ processes = self.processManager.ListProcessesInGuest(vm=self.vm, auth=self.vm_auth, pids=[pid])
+ except vim.fault.NoPermission as e:
+ raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId)))
+ except vmodl.fault.SystemError as e:
+ # https://pubs.vmware.com/vsphere-6-5/index.jsp?topic=%2Fcom.vmware.wssdk.smssdk.doc%2Fvmodl.fault.SystemError.html
+ # https://github.com/ansible/ansible/issues/57607
+ if e.reason == 'vix error codes = (1, 0).\n':
+ raise AnsibleConnectionFailure(
+ "Connection failed, Netlogon service stopped or dcpromo in progress. Reason: %s" % (
+ to_native(e.reason)
+ )
+ )
+ else:
+ raise AnsibleConnectionFailure("Connection plugin failed. Reason: %s" % (to_native(e.reason)))
+ except vim.fault.GuestOperationsUnavailable:
+ raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable")
+ except vim.fault.InvalidGuestLogin:
+ raise AnsibleConnectionFailure("Guest login failed. Native error: InvalidGuestLogin")
+
+ return processes[0]
+
+ def _fix_url_for_hosts(self, url):
+ """
+ Fix url if connection is a host.
+
+ The host part of the URL is returned as '*' if the hostname to be used is the name of the server to which the call was made. For example, if the call is
+ made to esx-svr-1.domain1.com, and the file is available for download from http://esx-svr-1.domain1.com/guestFile?id=1&token=1234, the URL returned may
+ be http://*/guestFile?id=1&token=1234. The client replaces the asterisk with the server name on which it invoked the call.
+
+ https://code.vmware.com/apis/358/vsphere#/doc/vim.vm.guest.FileManager.FileTransferInformation.html
+ """
+ return url.replace("*", self.vmware_host)
+
+ def _fetch_file_from_vm(self, guestFilePath):
+ try:
+ fileTransferInformation = self.fileManager.InitiateFileTransferFromGuest(vm=self.vm, auth=self.vm_auth, guestFilePath=guestFilePath)
+ except vim.fault.NoPermission as e:
+ raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId)))
+ except vmodl.fault.SystemError as e:
+ if e.reason == 'vix error codes = (3016, 0).\n':
+ raise AnsibleConnectionFailure(
+ "Connection failed, is the vm currently rebooting? Reason: %s" % (
+ to_native(e.reason)
+ )
+ )
+ else:
+ raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason)))
+ except vim.fault.GuestOperationsUnavailable:
+ raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable")
+
+ url = self._fix_url_for_hosts(fileTransferInformation.url)
+ response = requests.get(url, verify=self.validate_certs, stream=True)
+
+ if response.status_code != 200:
+ raise AnsibleError("Failed to fetch file")
+
+ return response
+
+ def delete_file_in_guest(self, filePath):
+ """Delete file from VM."""
+ try:
+ self.fileManager.DeleteFileInGuest(vm=self.vm, auth=self.vm_auth, filePath=filePath)
+ except vim.fault.NoPermission as e:
+ raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId)))
+ except vmodl.fault.SystemError as e:
+ if e.reason == 'vix error codes = (3016, 0).\n':
+ raise AnsibleConnectionFailure(
+ "Connection failed, is the vm currently rebooting? Reason: %s" % (
+ to_native(e.reason)
+ )
+ )
+ else:
+ raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason)))
+ except vim.fault.GuestOperationsUnavailable:
+ raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable")
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ """Execute command."""
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ stdout = self.create_temporary_file_in_guest(suffix=".stdout")
+ stderr = self.create_temporary_file_in_guest(suffix=".stderr")
+
+ guest_program_spec = self._get_guest_program_spec(cmd, stdout, stderr)
+
+ try:
+ pid = self.processManager.StartProgramInGuest(vm=self.vm, auth=self.vm_auth, spec=guest_program_spec)
+ except vim.fault.NoPermission as e:
+ raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId)))
+ except vim.fault.FileNotFound as e:
+ raise AnsibleError("StartProgramInGuest Error: %s" % to_native(e.msg))
+ except vmodl.fault.SystemError as e:
+ if e.reason == 'vix error codes = (3016, 0).\n':
+ raise AnsibleConnectionFailure(
+ "Connection failed, is the vm currently rebooting? Reason: %s" % (
+ to_native(e.reason)
+ )
+ )
+ else:
+ raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason)))
+ except vim.fault.GuestOperationsUnavailable:
+ raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable")
+
+ pid_info = self._get_pid_info(pid)
+
+ while pid_info.endTime is None:
+ sleep(self.get_option("exec_command_sleep_interval"))
+ pid_info = self._get_pid_info(pid)
+
+ stdout_response = self._fetch_file_from_vm(stdout)
+ self.delete_file_in_guest(stdout)
+
+ stderr_response = self._fetch_file_from_vm(stderr)
+ self.delete_file_in_guest(stderr)
+
+ return pid_info.exitCode, stdout_response.text, stderr_response.text
+
+ def fetch_file(self, in_path, out_path):
+ """Fetch file."""
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ in_path_response = self._fetch_file_from_vm(in_path)
+
+ with open(out_path, "wb") as fd:
+ for chunk in in_path_response.iter_content(chunk_size=self.get_option("file_chunk_size")):
+ fd.write(chunk)
+
+ def put_file(self, in_path, out_path):
+ """Put file."""
+ super(Connection, self).put_file(in_path, out_path)
+
+ if not exists(to_bytes(in_path, errors="surrogate_or_strict")):
+ raise AnsibleFileNotFound("file or module does not exist: '%s'" % to_native(in_path))
+
+ try:
+ put_url = self.fileManager.InitiateFileTransferToGuest(
+ vm=self.vm, auth=self.vm_auth, guestFilePath=out_path, fileAttributes=vim.GuestFileAttributes(), fileSize=getsize(in_path), overwrite=True
+ )
+ except vim.fault.NoPermission as e:
+ raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId)))
+ except vmodl.fault.SystemError as e:
+ if e.reason == 'vix error codes = (3016, 0).\n':
+ raise AnsibleConnectionFailure(
+ "Connection failed, is the vm currently rebooting? Reason: %s" % (
+ to_native(e.reason)
+ )
+ )
+ else:
+ raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason)))
+ except vim.fault.GuestOperationsUnavailable:
+ raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable")
+
+ url = self._fix_url_for_hosts(put_url)
+
+ # file size of 'in_path' must be greater than 0
+ with open(in_path, "rb") as fd:
+ response = requests.put(url, verify=self.validate_certs, data=fd)
+
+ if response.status_code != 200:
+ raise AnsibleError("File transfer failed")
diff --git a/ansible_collections/community/vmware/plugins/doc_fragments/__init__.py b/ansible_collections/community/vmware/plugins/doc_fragments/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/doc_fragments/__init__.py
diff --git a/ansible_collections/community/vmware/plugins/doc_fragments/vmware.py b/ansible_collections/community/vmware/plugins/doc_fragments/vmware.py
new file mode 100644
index 000000000..51f182e87
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/doc_fragments/vmware.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Charles Paul <cpaul@ansible.com>
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Parameters for VMware modules
+ DOCUMENTATION = r'''
+notes:
+ - All modules requires API write access and hence is not supported on a free ESXi license.
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the vSphere vCenter or ESXi server.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_HOST) will be used instead.
+ - Environment variable support added in Ansible 2.6.
+ type: str
+ username:
+ description:
+ - The username of the vSphere vCenter or ESXi server.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_USER) will be used instead.
+ - Environment variable support added in Ansible 2.6.
+ type: str
+ aliases: [ admin, user ]
+ password:
+ description:
+ - The password of the vSphere vCenter or ESXi server.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PASSWORD) will be used instead.
+ - Environment variable support added in Ansible 2.6.
+ type: str
+ aliases: [ pass, pwd ]
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_VALIDATE_CERTS) will be used instead.
+ - Environment variable support added in Ansible 2.6.
+ - If set to C(true), please make sure Python >= 2.7.9 is installed on the given machine.
+ type: bool
+ default: true
+ port:
+ description:
+ - The port number of the vSphere vCenter or ESXi server.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PORT) will be used instead.
+ - Environment variable support added in Ansible 2.6.
+ type: int
+ default: 443
+ proxy_host:
+ description:
+ - Address of a proxy that will receive all HTTPS requests and relay them.
+ - The format is a hostname or a IP.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PROXY_HOST) will be used instead.
+ - This feature depends on a version of pyvmomi greater than v6.7.1.2018.12
+ type: str
+ required: false
+ proxy_port:
+ description:
+ - Port of the HTTP proxy that will receive all HTTPS requests and relay them.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PROXY_PORT) will be used instead.
+ type: int
+ required: false
+'''
+
+ # This doc fragment is specific to vcenter modules like vcenter_license
+ VCENTER_DOCUMENTATION = r'''
+notes:
+ - All modules requires API write access and hence is not supported on a free ESXi license.
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the vSphere vCenter server.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_HOST) will be used instead.
+ - Environment variable supported added in Ansible 2.6.
+ type: str
+ username:
+ description:
+ - The username of the vSphere vCenter server.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_USER) will be used instead.
+ - Environment variable supported added in Ansible 2.6.
+ type: str
+ aliases: [ admin, user ]
+ password:
+ description:
+ - The password of the vSphere vCenter server.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PASSWORD) will be used instead.
+ - Environment variable supported added in Ansible 2.6.
+ type: str
+ aliases: [ pass, pwd ]
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_VALIDATE_CERTS) will be used instead.
+ - Environment variable supported added in Ansible 2.6.
+ - If set to C(true), please make sure Python >= 2.7.9 is installed on the given machine.
+ type: bool
+ default: true
+ port:
+ description:
+ - The port number of the vSphere vCenter server.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PORT) will be used instead.
+ - Environment variable supported added in Ansible 2.6.
+ type: int
+ default: 443
+ proxy_host:
+ description:
+ - Address of a proxy that will receive all HTTPS requests and relay them.
+ - The format is a hostname or a IP.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PROXY_HOST) will be used instead.
+ type: str
+ required: false
+ proxy_port:
+ description:
+ - Port of the HTTP proxy that will receive all HTTPS requests and relay them.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PROXY_PORT) will be used instead.
+ type: int
+ required: false
+ '''
diff --git a/ansible_collections/community/vmware/plugins/doc_fragments/vmware_rest_client.py b/ansible_collections/community/vmware/plugins/doc_fragments/vmware_rest_client.py
new file mode 100644
index 000000000..c98c42670
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/doc_fragments/vmware_rest_client.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Parameters for VMware REST Client based modules
+ DOCUMENTATION = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the vSphere vCenter server.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_HOST) will be used instead.
+ type: str
+ username:
+ description:
+ - The username of the vSphere vCenter server.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_USER) will be used instead.
+ type: str
+ aliases: [ admin, user ]
+ password:
+ description:
+ - The password of the vSphere vCenter server.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PASSWORD) will be used instead.
+ type: str
+ aliases: [ pass, pwd ]
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid.
+ - Set to C(false) when certificates are not trusted.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_VALIDATE_CERTS) will be used instead.
+ type: bool
+ default: true
+ port:
+ description:
+ - The port number of the vSphere vCenter.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PORT) will be used instead.
+ type: int
+ default: 443
+ protocol:
+ description:
+ - The connection to protocol.
+ type: str
+ choices: [ http, https ]
+ default: https
+ proxy_host:
+ description:
+ - Address of a proxy that will receive all HTTPS requests and relay them.
+ - The format is a hostname or a IP.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PROXY_HOST) will be used instead.
+ type: str
+ required: false
+ proxy_port:
+ description:
+ - Port of the HTTP proxy that will receive all HTTPS requests and relay them.
+ - If the value is not specified in the task, the value of environment variable C(VMWARE_PROXY_PORT) will be used instead.
+ type: int
+ required: false
+'''
diff --git a/ansible_collections/community/vmware/plugins/httpapi/__init__.py b/ansible_collections/community/vmware/plugins/httpapi/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/httpapi/__init__.py
diff --git a/ansible_collections/community/vmware/plugins/httpapi/vmware.py b/ansible_collections/community/vmware/plugins/httpapi/vmware.py
new file mode 100644
index 000000000..ba2bda6df
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/httpapi/vmware.py
@@ -0,0 +1,86 @@
+# Copyright: (c) 2018 Red Hat Inc.
+# Copyright: (c) 2019, Ansible Project
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author:
+- Abhijeet Kasurde (@Akasurde)
+name : vmware
+short_description: HttpApi Plugin for VMware REST API
+description:
+ - This HttpApi plugin provides methods to connect to VMware vCenter over a HTTP(S)-based APIs.
+'''
+
+import json
+
+from ansible.module_utils.basic import to_text
+from ansible.errors import AnsibleConnectionFailure
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.plugins.httpapi import HttpApiBase
+from ansible.module_utils.connection import ConnectionError
+
+BASE_HEADERS = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+}
+
+
+class HttpApi(HttpApiBase):
+ def login(self, username, password):
+ if username and password:
+ payload = {}
+ url = '/rest/com/vmware/cis/session'
+ response, response_data = self.send_request(url, payload)
+ else:
+ raise AnsibleConnectionFailure('Username and password are required for login')
+
+ if response == 404:
+ raise ConnectionError(response_data)
+
+ if not response_data.get('value'):
+ raise ConnectionError('Server returned response without token info during connection authentication: %s' % response)
+
+ self.connection._session_uid = "vmware-api-session-id:%s" % response_data['value']
+ self.connection._token = response_data['value']
+
+ def logout(self):
+ response, dummy = self.send_request('/rest/com/vmware/cis/session', None, method='DELETE')
+
+ def get_session_uid(self):
+ return self.connection._session_uid
+
+ def get_session_token(self):
+ return self.connection._token
+
+ def send_request(self, path, body_params, method='POST'):
+ data = json.dumps(body_params) if body_params else '{}'
+
+ try:
+ self._display_request(method=method)
+ response, response_data = self.connection.send(path, data, method=method, headers=BASE_HEADERS, force_basic_auth=True)
+ response_value = self._get_response_value(response_data)
+
+ return response.getcode(), self._response_to_json(response_value)
+ except AnsibleConnectionFailure:
+ return 404, 'Object not found'
+ except HTTPError as e:
+ return e.code, json.loads(e.read())
+
+ def _display_request(self, method='POST'):
+ self.connection.queue_message('vvvv', 'Web Services: %s %s' % (method, self.connection._url))
+
+ def _get_response_value(self, response_data):
+ return to_text(response_data.getvalue())
+
+ def _response_to_json(self, response_text):
+ try:
+ return json.loads(response_text) if response_text else {}
+ # JSONDecodeError only available on Python 3.5+
+ except ValueError:
+ raise ConnectionError('Invalid JSON response: %s' % response_text)
diff --git a/ansible_collections/community/vmware/plugins/inventory/__init__.py b/ansible_collections/community/vmware/plugins/inventory/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/inventory/__init__.py
diff --git a/ansible_collections/community/vmware/plugins/inventory/vmware_host_inventory.py b/ansible_collections/community/vmware/plugins/inventory/vmware_host_inventory.py
new file mode 100644
index 000000000..20a930c47
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/inventory/vmware_host_inventory.py
@@ -0,0 +1,513 @@
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2020, dacrystal
+# Copyright: (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+ name: vmware_host_inventory
+ short_description: VMware ESXi hostsystem inventory source
+ author:
+ - Abhijeet Kasurde (@Akasurde)
+ description:
+ - Get VMware ESXi hostsystem as inventory hosts from VMware environment.
+ - Uses any file which ends with vmware.yml, vmware.yaml, vmware_host_inventory.yml, or vmware_host_inventory.yaml as a YAML configuration file.
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ requirements:
+ - "vSphere Automation SDK - For tag feature"
+ options:
+ hostname:
+ description: Name of vCenter or ESXi server.
+ required: true
+ env:
+ - name: VMWARE_HOST
+ - name: VMWARE_SERVER
+ username:
+ description:
+ - Name of vSphere user.
+ - Accepts vault encrypted variable.
+ required: true
+ env:
+ - name: VMWARE_USER
+ - name: VMWARE_USERNAME
+ password:
+ description:
+ - Password of vSphere user.
+ - Accepts vault encrypted variable.
+ required: true
+ env:
+ - name: VMWARE_PASSWORD
+ port:
+ description: Port number used to connect to vCenter or ESXi Server.
+ default: 443
+ type: int
+ env:
+ - name: VMWARE_PORT
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid.
+ - Set to C(false) when certificates are not trusted.
+ default: true
+ type: bool
+ env:
+ - name: VMWARE_VALIDATE_CERTS
+ with_tags:
+ description:
+ - Include tags and associated hosts.
+ - Requires 'vSphere Automation SDK' library to be installed on the given controller machine.
+ - Please refer following URLs for installation steps
+ - U(https://code.vmware.com/web/sdk/7.0/vsphere-automation-python)
+ default: false
+ type: bool
+ hostnames:
+ description:
+ - A list of templates in order of precedence to compose inventory_hostname.
+ - Ignores template if resulted in an empty string or None value.
+ - You can use property specified in I(properties) as variables in the template.
+ type: list
+ elements: string
+ default: ['name']
+ properties:
+ description:
+ - Specify the list of VMware schema properties associated with the ESXi hostsystem.
+ - These properties will be populated in hostvars of the given ESXi hostsystem.
+ - Each value in the list can be a path to a specific property in hostsystem object or a path to a collection of hostsystem objects.
+ - C(summary.runtime.powerState) are required if C(keyed_groups) is set to default.
+ - Please make sure that all the properties that are used in other parameters are included in this options.
+ - In addition to ESXi hostsystem's properties, the following are special values
+ - Use C(customValue) to populate ESXi hostsystem's custom attributes. C(customValue) is only supported by vCenter and not by ESXi.
+ - Use C(all) to populate all the properties of the virtual machine.
+ The value C(all) is time consuming operation, do not use unless required absolutely.
+ type: list
+ elements: string
+ default: [ 'name', 'customValue', 'summary.runtime.powerState' ]
+ with_nested_properties:
+ description:
+ - This option transform flatten properties name to nested dictionary.
+ type: bool
+ default: true
+ keyed_groups:
+ description:
+ - Add hosts to group based on the values of a variable.
+ type: list
+ default: [
+ {key: 'summary.runtime.powerState', separator: ''},
+ ]
+ filters:
+ description:
+ - This option allows client-side filtering hosts with jinja templating.
+ - When server-side filtering is introduced, it should be preferred over this.
+ type: list
+ elements: str
+ default: []
+ resources:
+ description:
+ - A list of resources to limit search scope.
+ - Each resource item is represented by exactly one C('vim_type_snake_case):C(list of resource names) pair and optional nested I(resources)
+ - Key name is based on snake case of a vim type name; e.g C(host_system) correspond to C(vim.HostSystem)
+ required: false
+ type: list
+ elements: dict
+ default: []
+ with_path:
+ description:
+ - Include ESXi hostsystem's path.
+ - Set this option to a string value to replace root name from I('Datacenters').
+ default: false
+ type: bool
+ with_sanitized_property_name:
+ description:
+ - This option allows property name sanitization to create safe property names for use in Ansible.
+ - Also, transforms property name to snake case.
+ type: bool
+ default: false
+ proxy_host:
+ description:
+ - Address of a proxy that will receive all HTTPS requests and relay them.
+ - The format is a hostname or a IP.
+ - This feature depends on a version of pyvmomi>=v6.7.1.2018.12.
+ type: str
+ required: false
+ env:
+ - name: VMWARE_PROXY_HOST
+ proxy_port:
+ description:
+ - Port of the HTTP proxy that will receive all HTTPS requests and relay them.
+ type: int
+ required: false
+ env:
+ - name: VMWARE_PROXY_PORT
+"""
+
+EXAMPLES = r"""
+# Sample configuration file for VMware Host dynamic inventory
+ plugin: community.vmware.vmware_host_inventory
+ strict: false
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ with_tags: true
+
+# Using compose
+ plugin: community.vmware.vmware_host_inventory
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ properties:
+ - name
+ - summary
+ - config.lockdownMode
+ compose:
+ ansible_user: "'root'"
+ ansible_connection: "'ssh'"
+"""
+
+try:
+ from com.vmware.vapi.std_client import DynamicID
+except ImportError:
+ # Already handled in module_utils/inventory.py
+ pass
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ # Already handled in module_utils/inventory.py
+ pass
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.six import text_type
+from ansible_collections.community.vmware.plugins.plugin_utils.inventory import (
+ to_nested_dict,
+ to_flatten_dict,
+)
+from ansible_collections.community.vmware.plugins.inventory.vmware_vm_inventory import BaseVMwareInventory
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = "community.vmware.vmware_host_inventory"
+
+ def verify_file(self, path):
+ """
+ Verify plugin configuration file and mark this plugin active
+ Args:
+ path: Path of configuration YAML file
+ Returns: True if everything is correct, else False
+ """
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(
+ (
+ "vmware.yaml",
+ "vmware.yml",
+ "vmware_host_inventory.yaml",
+ "vmware_host_inventory.yml",
+ )
+ ):
+ valid = True
+
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+ """
+ Parses the inventory file
+ """
+ super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
+
+ cache_key = self.get_cache_key(path)
+
+ config_data = self._read_config_data(path)
+
+ # set _options from config data
+ self._consume_options(config_data)
+
+ username = self.get_option("username")
+ password = self.get_option("password")
+
+ if isinstance(username, AnsibleVaultEncryptedUnicode):
+ username = username.data
+
+ if isinstance(password, AnsibleVaultEncryptedUnicode):
+ password = password.data
+
+ self.pyv = BaseVMwareInventory(
+ hostname=self.get_option("hostname"),
+ username=username,
+ password=password,
+ port=self.get_option("port"),
+ with_tags=self.get_option("with_tags"),
+ validate_certs=self.get_option("validate_certs"),
+ http_proxy_host=self.get_option("proxy_host"),
+ http_proxy_port=self.get_option("proxy_port")
+ )
+
+ self.pyv.do_login()
+
+ if cache:
+ cache = self.get_option("cache")
+
+ update_cache = False
+ if cache:
+ try:
+ cacheable_results = self._cache[cache_key]
+ except KeyError:
+ update_cache = True
+
+ if cache and not update_cache:
+ self._populate_from_cache(cacheable_results)
+ else:
+ cacheable_results = self._populate_from_source()
+
+ if update_cache or (not cache and self.get_option("cache")):
+ self._cache[cache_key] = cacheable_results
+
+ def _populate_from_cache(self, cache_data):
+ """
+ Populate cache using source data
+
+ """
+ for host, host_properties in cache_data.items():
+ self._populate_host_properties(host_properties, host)
+
+ def _populate_from_source(self):
+ """
+ Populate inventory data from direct source
+
+ """
+ hostvars = {}
+ strict = self.get_option("strict")
+
+ host_properties = self.get_option("properties")
+ if not isinstance(host_properties, list):
+ host_properties = [host_properties]
+
+ if len(host_properties) == 0:
+ host_properties = ["name"]
+
+ if "all" in host_properties:
+ query_props = None
+ host_properties.remove("all")
+ else:
+ if "runtime.connectionState" not in host_properties:
+ host_properties.append("runtime.connectionState")
+ query_props = [x for x in host_properties if x != "customValue"]
+
+ objects = self.pyv.get_managed_objects_properties(
+ vim_type=vim.HostSystem,
+ properties=query_props,
+ resources=self.get_option("resources"),
+ strict=strict,
+ )
+
+ tags_info = dict()
+ if self.pyv.with_tags:
+ tag_svc = self.pyv.rest_content.tagging.Tag
+ cat_svc = self.pyv.rest_content.tagging.Category
+
+ tags = tag_svc.list()
+ for tag in tags:
+ tag_obj = tag_svc.get(tag)
+ tags_info[tag_obj.id] = (
+ tag_obj.name,
+ cat_svc.get(tag_obj.category_id).name,
+ )
+
+ hostnames = self.get_option("hostnames")
+
+ for host_obj in objects:
+ properties = dict()
+ for host_obj_property in host_obj.propSet:
+ properties[host_obj_property.name] = host_obj_property.val
+
+ if (
+ properties.get("runtime.connectionState")
+ or properties["runtime"].connectionState
+ ) in ("disconnected", "notResponding"):
+ continue
+
+ # Custom values
+ if "customValue" in host_properties:
+ field_mgr = []
+ if self.pyv.content.customFieldsManager: # not an ESXi
+ field_mgr = self.pyv.content.customFieldsManager.field
+ for cust_value in host_obj.obj.customValue:
+ properties[
+ [y.name for y in field_mgr if y.key == cust_value.key][0]
+ ] = cust_value.value
+
+ # Tags
+ if self.pyv.with_tags:
+ properties["tags"] = []
+ properties["categories"] = []
+ properties["tag_category"] = {}
+
+ if tags_info:
+ # Add virtual machine to appropriate tag group
+ host_mo_id = host_obj.obj._GetMoId() # pylint: disable=protected-access
+ host_dynamic_id = DynamicID(type="HostSystem", id=host_mo_id)
+ tag_association = self.pyv.rest_content.tagging.TagAssociation
+ for tag_id in tag_association.list_attached_tags(host_dynamic_id):
+ if tag_id not in tags_info:
+ # Ghost Tags
+ continue
+ # Add tags related to VM
+ properties["tags"].append(tags_info[tag_id][0])
+ # Add categories related to VM
+ properties["categories"].append(tags_info[tag_id][1])
+ # Add tag and categories related to VM
+ if tags_info[tag_id][1] not in properties["tag_category"]:
+ properties["tag_category"][tags_info[tag_id][1]] = []
+ properties["tag_category"][tags_info[tag_id][1]].append(
+ tags_info[tag_id][0]
+ )
+
+ # Path
+ with_path = self.get_option("with_path")
+ if with_path:
+ path = []
+ parent = host_obj.obj.parent
+ while parent:
+ path.append(parent.name)
+ parent = parent.parent
+ path.reverse()
+ properties["path"] = "/".join(path)
+
+ host_properties = to_nested_dict(properties)
+
+ # Check if we can add host as per filters
+ host_filters = self.get_option("filters")
+ if not self._can_add_host(host_filters, host_properties, strict=strict):
+ continue
+
+ host = self._get_hostname(host_properties, hostnames, strict=strict)
+
+ if host not in hostvars:
+ hostvars[host] = host_properties
+ self._populate_host_properties(host_properties, host)
+ self.inventory.set_variable(
+ host, "ansible_host", self.get_management_ip(host_obj.obj)
+ )
+
+ return hostvars
+
+ def _get_hostname(self, properties, hostnames, strict=False):
+ hostname = None
+ errors = []
+
+ for preference in hostnames:
+ try:
+ hostname = self._compose(preference, properties)
+ except Exception as e: # pylint: disable=broad-except
+ if strict:
+ raise AnsibleError(
+ "Could not compose %s as hostnames - %s"
+ % (preference, to_native(e))
+ )
+
+ errors.append((preference, str(e)))
+ if hostname:
+ return to_text(hostname)
+
+ raise AnsibleError(
+ "Could not template any hostname for host, errors for each preference: %s"
+ % (", ".join(["%s: %s" % (pref, err) for pref, err in errors]))
+ )
+
+ def _can_add_host(self, host_filters, host_properties, strict=False):
+ can_add_host = True
+ for host_filter in host_filters:
+ try:
+ can_add_host = self._compose(host_filter, host_properties)
+ except Exception as e: # pylint: disable=broad-except
+ if strict:
+ raise AnsibleError(
+ "Could not evaluate %s as host filters - %s"
+ % (host_filter, to_native(e))
+ )
+
+ if not can_add_host:
+ return False
+ return True
+
+ @staticmethod
+ def get_management_ip(host):
+ try:
+ vnic_manager = host.configManager.virtualNicManager
+ net_config = vnic_manager.QueryNetConfig("management")
+ # filter nics that are selected
+ for nic in net_config.candidateVnic:
+ if nic.key in net_config.selectedVnic:
+ # add hostvar 'management_ip' to each host
+ return nic.spec.ip.ipAddress
+ except Exception:
+ return ""
+ return ""
+
+ def _populate_host_properties(self, host_properties, host):
+ # Load VM properties in host_vars
+ self.inventory.add_host(host)
+
+ # Use constructed if applicable
+ strict = self.get_option("strict")
+
+ # Composed variables
+ compose = self.get_option("compose")
+ self._set_composite_vars(compose, host_properties, host, strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(
+ self.get_option("groups"), host_properties, host, strict=strict
+ )
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(
+ self.get_option("keyed_groups"), host_properties, host, strict=strict
+ )
+
+ with_path = self.get_option("with_path")
+ if with_path:
+ parents = host_properties["path"].split("/")
+ if parents:
+ if isinstance(with_path, text_type):
+ parents = [with_path] + parents
+
+ c_name = self._sanitize_group_name("/".join(parents))
+ c_group = self.inventory.add_group(c_name)
+ self.inventory.add_host(host, c_group)
+ parents.pop()
+
+ while len(parents) > 0:
+ p_name = self._sanitize_group_name("/".join(parents))
+ p_group = self.inventory.add_group(p_name)
+
+ self.inventory.add_child(p_group, c_group)
+ c_group = p_group
+ parents.pop()
+
+ can_sanitize = self.get_option("with_sanitized_property_name")
+
+ # Sanitize host properties: to snake case
+ if can_sanitize: # to snake case
+ host_properties = camel_dict_to_snake_dict(host_properties)
+
+ with_nested_properties = self.get_option("with_nested_properties")
+ if with_nested_properties:
+ for k, v in host_properties.items():
+ k = self._sanitize_group_name(k) if can_sanitize else k
+ self.inventory.set_variable(host, k, v)
+
+ # For backward compatability
+ host_properties = to_flatten_dict(host_properties)
+ for k, v in host_properties.items():
+ k = self._sanitize_group_name(k) if can_sanitize else k
+ self.inventory.set_variable(host, k, v)
diff --git a/ansible_collections/community/vmware/plugins/inventory/vmware_vm_inventory.py b/ansible_collections/community/vmware/plugins/inventory/vmware_vm_inventory.py
new file mode 100644
index 000000000..a8747d731
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/inventory/vmware_vm_inventory.py
@@ -0,0 +1,901 @@
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2020, dacrystal
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: vmware_vm_inventory
+ short_description: VMware Guest inventory source
+ author:
+ - Abhijeet Kasurde (@Akasurde)
+ description:
+ - Get virtual machines as inventory hosts from VMware environment.
+ - Uses any file which ends with vmware.yml, vmware.yaml, vmware_vm_inventory.yml, or vmware_vm_inventory.yaml as a YAML configuration file.
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ requirements:
+ - "requests >= 2.3"
+ - "vSphere Automation SDK - For tag feature"
+ options:
+ hostname:
+ description: Name of vCenter or ESXi server.
+ required: true
+ env:
+ - name: VMWARE_HOST
+ - name: VMWARE_SERVER
+ username:
+ description:
+ - Name of vSphere user.
+ - Accepts vault encrypted variable.
+ required: true
+ env:
+ - name: VMWARE_USER
+ - name: VMWARE_USERNAME
+ password:
+ description:
+ - Password of vSphere user.
+ - Accepts vault encrypted variable.
+ required: true
+ env:
+ - name: VMWARE_PASSWORD
+ port:
+ description: Port number used to connect to vCenter or ESXi Server.
+ default: 443
+ type: int
+ env:
+ - name: VMWARE_PORT
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid.
+ - Set to C(false) when certificates are not trusted.
+ default: true
+ type: bool
+ env:
+ - name: VMWARE_VALIDATE_CERTS
+ with_tags:
+ description:
+ - Include tags and associated virtual machines.
+ - Requires 'vSphere Automation SDK' library to be installed on the given controller machine.
+ - Please refer following URLs for installation steps
+ - U(https://code.vmware.com/web/sdk/7.0/vsphere-automation-python)
+ default: false
+ type: bool
+ hostnames:
+ description:
+ - A list of templates in order of precedence to compose inventory_hostname.
+ - Ignores template if resulted in an empty string or None value.
+ - You can use property specified in I(properties) as variables in the template.
+ type: list
+ elements: string
+ default: ['config.name + "_" + config.uuid']
+ properties:
+ description:
+ - Specify the list of VMware schema properties associated with the VM.
+ - These properties will be populated in hostvars of the given VM.
+ - Each value in the list can be a path to a specific property in VM object or a path to a collection of VM objects.
+ - C(config.name), C(config.uuid) are required properties if C(hostnames) is set to default.
+ - C(config.guestId), C(summary.runtime.powerState) are required if C(keyed_groups) is set to default.
+ - Please make sure that all the properties that are used in other parameters are included in this options.
+ - In addition to VM properties, the following are special values
+ - Use C(customValue) to populate virtual machine's custom attributes. C(customValue) is only supported by vCenter and not by ESXi.
+ - Use C(all) to populate all the properties of the virtual machine.
+ The value C(all) is time consuming operation, do not use unless required absolutely.
+ - Please refer more VMware guest attributes which can be used as properties
+ U(https://docs.ansible.com/ansible/latest/collections/community/vmware/docsite/vmware_scenarios/vmware_inventory_vm_attributes.html)
+ type: list
+ elements: string
+ default: [ 'name', 'config.cpuHotAddEnabled', 'config.cpuHotRemoveEnabled',
+ 'config.instanceUuid', 'config.hardware.numCPU', 'config.template',
+ 'config.name', 'config.uuid', 'guest.hostName', 'guest.ipAddress',
+ 'guest.guestId', 'guest.guestState', 'runtime.maxMemoryUsage',
+ 'customValue', 'summary.runtime.powerState', 'config.guestId',
+ ]
+ with_nested_properties:
+ description:
+ - This option transform flatten properties name to nested dictionary.
+ - From 1.10.0 and onwards, default value is set to C(true).
+ type: bool
+ default: true
+ keyed_groups:
+ description:
+ - Add hosts to group based on the values of a variable.
+ type: list
+ default: [
+ {key: 'config.guestId', separator: ''},
+ {key: 'summary.runtime.powerState', separator: ''},
+ ]
+ filters:
+ description:
+ - This option allows client-side filtering hosts with jinja templating.
+ - When server-side filtering is introduced, it should be preferred over this.
+ type: list
+ elements: str
+ default: []
+ resources:
+ description:
+ - A list of resources to limit search scope.
+ - Each resource item is represented by exactly one C('vim_type_snake_case):C(list of resource names) pair and optional nested I(resources)
+ - Key name is based on snake case of a vim type name; e.g C(host_system) correspond to C(vim.HostSystem)
+ - See L(VIM Types,https://pubs.vmware.com/vi-sdk/visdk250/ReferenceGuide/index-mo_types.html)
+ required: false
+ type: list
+ elements: dict
+ default: []
+ with_path:
+ description:
+ - Include virtual machines path.
+ - Set this option to a string value to replace root name from I('Datacenters').
+ default: false
+ type: bool
+ with_sanitized_property_name:
+ description:
+ - This option allows property name sanitization to create safe property names for use in Ansible.
+ - Also, transforms property name to snake case.
+ type: bool
+ default: false
+ proxy_host:
+ description:
+ - Address of a proxy that will receive all HTTPS requests and relay them.
+ - The format is a hostname or a IP.
+ - This feature depends on a version of pyvmomi>=v6.7.1.2018.12.
+ type: str
+ required: false
+ env:
+ - name: VMWARE_PROXY_HOST
+ proxy_port:
+ description:
+ - Port of the HTTP proxy that will receive all HTTPS requests and relay them.
+ type: int
+ required: false
+ env:
+ - name: VMWARE_PROXY_PORT
+'''
+
+EXAMPLES = r'''
+# Sample configuration file for VMware Guest dynamic inventory
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ with_tags: true
+
+# Gather minimum set of properties for VMware guest
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ properties:
+ - 'name'
+ - 'guest.ipAddress'
+ - 'config.name'
+ - 'config.uuid'
+
+# Create Groups based upon VMware Tools status
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ properties:
+ - 'name'
+ - 'config.name'
+ - 'guest.toolsStatus'
+ - 'guest.toolsRunningStatus'
+ hostnames:
+ - config.name
+ keyed_groups:
+ - key: guest.toolsStatus
+ separator: ''
+ - key: guest.toolsRunningStatus
+ separator: ''
+
+# Filter VMs based upon condition
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ properties:
+ - 'runtime.powerState'
+ - 'config.name'
+ filters:
+ - runtime.powerState == "poweredOn"
+ hostnames:
+ - config.name
+
+# Filter VM's based on OR conditions
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ properties:
+ - 'name'
+ - 'config.name'
+ - 'guest.ipAddress'
+ - 'guest.toolsStatus'
+ - 'guest.toolsRunningStatus'
+ - 'config.guestFullName'
+ - 'config.guestId'
+ hostnames:
+ - 'config.name'
+ filters:
+ - config.guestId == "rhel7_64Guest" or config.name == "rhel_20_04_empty"
+
+# Filter VM's based on regex conditions
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ properties:
+ - 'config.name'
+ - 'config.guestId'
+ - 'guest.ipAddress'
+ - 'summary.runtime.powerState'
+ filters:
+ - guest.ipAddress is defined and (guest.ipAddress is match('192.168.*') or guest.ipAddress is match('192.169.*'))
+
+# Using compose and groups
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ properties:
+ - 'name'
+ - 'config.name'
+ - 'guest.ipAddress'
+ compose:
+ # This will populate the IP address of virtual machine if available
+ # and will be used while communicating to the given virtual machine
+ ansible_host: 'guest.ipAddress'
+ composed_var: 'config.name'
+ # This will populate a host variable with a string value
+ ansible_user: "'admin'"
+ ansible_connection: "'ssh'"
+ groups:
+ VMs: true
+ hostnames:
+ - config.name
+
+# Use Datacenter, Cluster and Folder value to list VMs
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.200.241
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ with_tags: true
+ resources:
+ - datacenter:
+ - Asia-Datacenter1
+ - Asia-Datacenter2
+ resources:
+ - compute_resource:
+ - Asia-Cluster1
+ resources:
+ - host_system:
+ - Asia-ESXI4
+ - folder:
+ - dev
+ - prod
+
+# Use Category and it's relation with Tag
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.201.128
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ hostnames:
+ - 'config.name'
+ properties:
+ - 'config.name'
+ - 'config.guestId'
+ - 'guest.ipAddress'
+ - 'summary.runtime.powerState'
+ with_tags: true
+ keyed_groups:
+ - key: tag_category.OS
+ prefix: "vmware_tag_os_category_"
+ separator: ""
+ with_nested_properties: true
+ filters:
+ - "tag_category.OS is defined and 'Linux' in tag_category.OS"
+
+# customizing hostnames based on VM's FQDN. The second hostnames template acts as a fallback mechanism.
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ hostnames:
+ - 'config.name+"."+guest.ipStack.0.dnsConfig.domainName'
+ - 'config.name'
+ properties:
+ - 'config.name'
+ - 'config.guestId'
+ - 'guest.hostName'
+ - 'guest.ipAddress'
+ - 'guest.guestFamily'
+ - 'guest.ipStack'
+
+# Select a specific IP address for use by ansible when multiple NICs are present on the VM
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ compose:
+ # Set the IP address used by ansible to one that starts by 10.42. or 10.43.
+ ansible_host: >-
+ guest.net
+ | selectattr('ipAddress')
+ | map(attribute='ipAddress')
+ | flatten
+ | select('match', '^10.42.*|^10.43.*')
+ | list
+ | first
+ properties:
+ - guest.net
+
+# Group hosts using Jinja2 conditionals
+ plugin: community.vmware.vmware_vm_inventory
+ strict: false
+ hostname: 10.65.13.37
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: false
+ hostnames:
+ - config.name
+ properties:
+ - 'name'
+ - 'config.name'
+ - 'config.datastoreUrl'
+ groups:
+ slow_storage: "'Nas01' in config.datastoreUrl[0].name"
+ fast_storage: "'SSD' in config.datastoreUrl[0].name"
+'''
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.module_utils.common.dict_transformations import _snake_to_camel
+from ansible.utils.display import Display
+from ansible.module_utils.six import text_type
+from ansible_collections.community.vmware.plugins.plugin_utils.inventory import (
+ to_nested_dict,
+ to_flatten_dict,
+)
+
+display = Display()
+
+try:
+ # requests is required for exception handling of the ConnectionError
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+try:
+ from com.vmware.vapi.std_client import DynamicID
+ from vmware.vapi.vsphere.client import create_vsphere_client
+ HAS_VSPHERE = True
+except ImportError:
+ HAS_VSPHERE = False
+
+
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+from ansible_collections.community.vmware.plugins.module_utils.vmware import connect_to_api
+
+
+class BaseVMwareInventory:
+ def __init__(self, hostname, username, password, port, validate_certs, with_tags, http_proxy_host, http_proxy_port):
+ self.hostname = hostname
+ self.username = username
+ self.password = password
+ self.port = port
+ self.with_tags = with_tags
+ self.validate_certs = validate_certs
+ self.content = None
+ self.rest_content = None
+ self.proxy_host = http_proxy_host
+ self.proxy_port = http_proxy_port
+
+ def do_login(self):
+ """
+ Check requirements and do login
+ """
+ self.check_requirements()
+ self.si, self.content = self._login()
+ if self.with_tags:
+ self.rest_content = self._login_vapi()
+
+ def _login_vapi(self):
+ """
+ Login to vCenter API using REST call
+ Returns: connection object
+
+ """
+ session = requests.Session()
+ session.verify = self.validate_certs
+ if not self.validate_certs:
+ # Disable warning shown at stdout
+ requests.packages.urllib3.disable_warnings()
+
+ server = self.hostname
+ if self.port:
+ server += ":" + str(self.port)
+
+ client, err = None, None
+ try:
+ client = create_vsphere_client(server=server,
+ username=self.username,
+ password=self.password,
+ session=session)
+ except Exception as error:
+ err = error
+
+ if client is None:
+ msg = "Failed to login to %s using %s" % (server, self.username)
+ if err:
+ msg += " due to : %s" % to_native(err)
+ raise AnsibleError(msg)
+ return client
+
+ def _login(self):
+ """
+ Login to vCenter or ESXi server
+ Returns: connection object
+
+ """
+ return connect_to_api(module=None, disconnect_atexit=True, return_si=True,
+ hostname=self.hostname, username=self.username, password=self.password,
+ port=self.port, validate_certs=self.validate_certs, httpProxyHost=self.proxy_host,
+ httpProxyPort=self.proxy_port)
+
+ def check_requirements(self):
+ """ Check all requirements for this inventory are satisfied"""
+ if not HAS_REQUESTS:
+ raise AnsibleParserError('Please install "requests" Python module as this is required'
+ ' for VMware Guest dynamic inventory plugin.')
+ elif not HAS_PYVMOMI:
+ raise AnsibleParserError('Please install "PyVmomi" Python module as this is required'
+ ' for VMware Guest dynamic inventory plugin.')
+ if HAS_REQUESTS:
+ # Pyvmomi 5.5 and onwards requires requests 2.3
+ # https://github.com/vmware/pyvmomi/blob/master/requirements.txt
+ required_version = (2, 3)
+ requests_version = requests.__version__.split(".")[:2]
+ try:
+ requests_major_minor = tuple(map(int, requests_version))
+ except ValueError:
+ raise AnsibleParserError("Failed to parse 'requests' library version.")
+
+ if requests_major_minor < required_version:
+ raise AnsibleParserError("'requests' library version should"
+ " be >= %s, found: %s." % (".".join([str(w) for w in required_version]),
+ requests.__version__))
+
+ if not HAS_VSPHERE and self.with_tags:
+ raise AnsibleError("Unable to find 'vSphere Automation SDK' Python library which is required."
+ " Please refer this URL for installation steps"
+ " - https://code.vmware.com/web/sdk/7.0/vsphere-automation-python")
+
+ if not all([self.hostname, self.username, self.password]):
+ raise AnsibleError("Missing one of the following : hostname, username, password. Please read "
+ "the documentation for more information.")
+
+ def get_managed_objects_properties(self, vim_type, properties=None, resources=None, strict=False): # noqa # pylint: disable=too-complex
+ """
+ Look up a Managed Object Reference in vCenter / ESXi Environment
+ :param vim_type: Type of vim object e.g, for datacenter - vim.Datacenter
+ :param properties: List of properties related to vim object e.g. Name
+ :param resources: List of resources to limit search scope
+ :param strict: Dictates if plugin raises error or just warns
+ :return: local content object
+ """
+ TraversalSpec = vmodl.query.PropertyCollector.TraversalSpec
+ FilterSpec = vmodl.query.PropertyCollector.FilterSpec
+ ObjectSpec = vmodl.query.PropertyCollector.ObjectSpec
+ PropertySpec = vmodl.query.PropertyCollector.PropertySpec
+
+ resource_filters = resources or []
+ type_to_name_map = {}
+
+ def _handle_error(message):
+ if strict:
+ raise AnsibleError(message)
+ else:
+ display.warning(message)
+
+ def get_contents(container, vim_types):
+ return self.content.propertyCollector.RetrieveContents([
+ FilterSpec(
+ objectSet=[
+ ObjectSpec(
+ obj=self.content.viewManager.CreateContainerView(
+ container, vim_types, True),
+ skip=False,
+ selectSet=[TraversalSpec(
+ type=vim.view.ContainerView, path='view', skip=False)]
+ )],
+ propSet=[PropertySpec(type=t, all=False, pathSet=['name']) for t in vim_types],
+ )
+ ])
+
+ def filter_containers(containers, typ, filter_list):
+ if len(filter_list) > 0:
+ objs = []
+ results = []
+ found_filters = {}
+
+ for container in containers:
+ results.extend(get_contents(container, [typ]))
+
+ for res in results:
+ if res.propSet[0].val in filter_list:
+ objs.append(res.obj)
+ found_filters[res.propSet[0].val] = True
+
+ for fil in filter_list:
+ if fil not in found_filters:
+ _handle_error("Unable to find %s %s" % (type_to_name_map[typ], fil))
+
+ return objs
+ return containers
+
+ def build_containers(containers, vim_type, names, filters):
+ filters = filters or []
+ if vim_type:
+ containers = filter_containers(containers, vim_type, names)
+
+ new_containers = []
+ for fil in filters:
+ new_filters = None
+ for k, v in fil.items():
+ if k == "resources":
+ new_filters = v
+ else:
+ vim_type = getattr(vim, _snake_to_camel(k, True))
+ names = v
+ type_to_name_map[vim_type] = k.replace("_", " ")
+
+ new_containers.extend(build_containers(containers, vim_type, names, new_filters))
+
+ if len(filters) > 0:
+ return new_containers
+ return containers
+
+ containers = build_containers([self.content.rootFolder], None, None, resource_filters)
+ if len(containers) == 0:
+ return []
+
+ objs_list = [ObjectSpec(
+ obj=self.content.viewManager.CreateContainerView(r, [vim_type], True),
+ selectSet=[TraversalSpec(path='view', skip=False, type=vim.view.ContainerView)]) for r in containers]
+
+ is_all = False if properties else True
+
+ # Create Property Spec
+ property_spec = PropertySpec(
+ type=vim_type, # Type of object to retrieved
+ all=is_all,
+ pathSet=properties
+ )
+
+ # Create Filter Spec
+ filter_spec = FilterSpec(
+ objectSet=objs_list,
+ propSet=[property_spec],
+ reportMissingObjectsInResults=False
+ )
+
+ try:
+ return self.content.propertyCollector.RetrieveContents([filter_spec])
+ except vmodl.query.InvalidProperty as err:
+ _handle_error("Invalid property name: %s" % err.name)
+ except Exception as err: # pylint: disable=broad-except
+ _handle_error("Couldn't retrieve contents from host: %s" % to_native(err))
+ return []
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'community.vmware.vmware_vm_inventory'
+
+ def verify_file(self, path):
+ """
+ Verify plugin configuration file and mark this plugin active
+ Args:
+ path: Path of configuration YAML file
+ Returns: True if everything is correct, else False
+ """
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('vmware.yaml', 'vmware.yml', 'vmware_vm_inventory.yaml', 'vmware_vm_inventory.yml')):
+ valid = True
+
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+ """
+ Parses the inventory file
+ """
+ super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
+
+ cache_key = self.get_cache_key(path)
+
+ config_data = self._read_config_data(path)
+
+ # set _options from config data
+ self._consume_options(config_data)
+
+ username = self.get_option('username')
+ password = self.get_option('password')
+
+ if isinstance(username, AnsibleVaultEncryptedUnicode):
+ username = username.data
+
+ if isinstance(password, AnsibleVaultEncryptedUnicode):
+ password = password.data
+
+ self.pyv = BaseVMwareInventory(
+ hostname=self.get_option('hostname'),
+ username=username,
+ password=password,
+ port=self.get_option('port'),
+ with_tags=self.get_option('with_tags'),
+ validate_certs=self.get_option('validate_certs'),
+ http_proxy_host=self.get_option('proxy_host'),
+ http_proxy_port=self.get_option('proxy_port')
+ )
+ self.pyv.do_login()
+
+ if cache:
+ cache = self.get_option('cache')
+
+ update_cache = False
+ if cache:
+ try:
+ cacheable_results = self._cache[cache_key]
+ except KeyError:
+ update_cache = True
+
+ if cache and not update_cache:
+ self._populate_from_cache(cacheable_results)
+ else:
+ cacheable_results = self._populate_from_source()
+
+ if update_cache or (not cache and self.get_option('cache')):
+ self._cache[cache_key] = cacheable_results
+
+ def _populate_from_cache(self, cache_data):
+ """
+ Populate cache using source data
+
+ """
+ for host, host_properties in cache_data.items():
+ self._populate_host_properties(host_properties, host)
+
+ def _populate_from_source(self):
+ """
+ Populate inventory data from direct source
+
+ """
+ hostvars = {}
+ strict = self.get_option('strict')
+
+ vm_properties = self.get_option('properties')
+ if not isinstance(vm_properties, list):
+ vm_properties = [vm_properties]
+
+ if len(vm_properties) == 0:
+ vm_properties = ['name']
+
+ if 'all' in vm_properties:
+ query_props = None
+ vm_properties.remove('all')
+ else:
+ if 'runtime.connectionState' not in vm_properties:
+ vm_properties.append('runtime.connectionState')
+ query_props = [x for x in vm_properties if x != "customValue"]
+
+ objects = self.pyv.get_managed_objects_properties(
+ vim_type=vim.VirtualMachine,
+ properties=query_props,
+ resources=self.get_option('resources'),
+ strict=strict,
+ )
+
+ tags_info = dict()
+ if self.pyv.with_tags:
+ tag_svc = self.pyv.rest_content.tagging.Tag
+ cat_svc = self.pyv.rest_content.tagging.Category
+
+ tags = tag_svc.list()
+ for tag in tags:
+ tag_obj = tag_svc.get(tag)
+ tags_info[tag_obj.id] = (tag_obj.name, cat_svc.get(tag_obj.category_id).name)
+
+ hostnames = self.get_option('hostnames')
+
+ for vm_obj in objects:
+ properties = dict()
+ for vm_obj_property in vm_obj.propSet:
+ properties[vm_obj_property.name] = vm_obj_property.val
+
+ if (properties.get('runtime.connectionState') or properties['runtime'].connectionState) in ('orphaned', 'inaccessible', 'disconnected'):
+ continue
+
+ # Custom values
+ if 'customValue' in vm_properties:
+ field_mgr = []
+ if self.pyv.content.customFieldsManager: # not an ESXi
+ field_mgr = self.pyv.content.customFieldsManager.field
+ for cust_value in vm_obj.obj.customValue:
+ properties[[y.name for y in field_mgr if y.key == cust_value.key][0]] = cust_value.value
+
+ # Tags
+ if tags_info:
+ # Add virtual machine to appropriate tag group
+ vm_mo_id = vm_obj.obj._GetMoId() # pylint: disable=protected-access
+ vm_dynamic_id = DynamicID(type='VirtualMachine', id=vm_mo_id)
+ tag_association = self.pyv.rest_content.tagging.TagAssociation
+ properties['tags'] = []
+ properties['categories'] = []
+ properties['tag_category'] = {}
+ for tag_id in tag_association.list_attached_tags(vm_dynamic_id):
+ if tag_id not in tags_info:
+ # Ghost Tags - community.vmware#681
+ continue
+ # Add tags related to VM
+ properties['tags'].append(tags_info[tag_id][0])
+ # Add categories related to VM
+ properties['categories'].append(tags_info[tag_id][1])
+ # Add tag and categories related to VM
+ if tags_info[tag_id][1] not in properties['tag_category']:
+ properties['tag_category'][tags_info[tag_id][1]] = []
+ properties['tag_category'][tags_info[tag_id][1]].append(tags_info[tag_id][0])
+
+ # Path
+ with_path = self.get_option('with_path')
+ if with_path:
+ path = []
+ parent = vm_obj.obj.parent
+ while parent:
+ path.append(parent.name)
+ parent = parent.parent
+ path.reverse()
+ properties['path'] = "/".join(path)
+
+ host_properties = to_nested_dict(properties)
+
+ # Check if we can add host as per filters
+ host_filters = self.get_option('filters')
+ if not self._can_add_host(host_filters, host_properties, strict=strict):
+ continue
+
+ host = self._get_hostname(host_properties, hostnames, strict=strict)
+
+ if host not in hostvars:
+ hostvars[host] = host_properties
+ self._populate_host_properties(host_properties, host)
+
+ return hostvars
+
+ def _get_hostname(self, properties, hostnames, strict=False):
+ hostname = None
+ errors = []
+
+ for preference in hostnames:
+ try:
+ hostname = self._compose(preference, properties)
+ except Exception as e: # pylint: disable=broad-except
+ if strict:
+ raise AnsibleError("Could not compose %s as hostnames - %s" % (preference, to_native(e)))
+ else:
+ errors.append(
+ (preference, str(e))
+ )
+ if hostname:
+ return to_text(hostname)
+
+ raise AnsibleError(
+ 'Could not template any hostname for host, errors for each preference: %s' % (
+ ', '.join(['%s: %s' % (pref, err) for pref, err in errors])
+ )
+ )
+
+ def _can_add_host(self, host_filters, host_properties, strict=False):
+ can_add_host = True
+ for host_filter in host_filters:
+ try:
+ can_add_host = self._compose(host_filter, host_properties)
+ except Exception as e: # pylint: disable=broad-except
+ if strict:
+ raise AnsibleError("Could not evaluate %s as host filters - %s" % (host_filter, to_native(e)))
+
+ if not can_add_host:
+ return False
+ return True
+
+ def _populate_host_properties(self, host_properties, host):
+ # Load VM properties in host_vars
+ self.inventory.add_host(host)
+
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+
+ # Composed variables
+ compose = self.get_option('compose')
+ if not compose:
+ compose['ansible_host'] = 'guest.ipAddress'
+
+ self._set_composite_vars(compose, host_properties, host, strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host_properties, host, strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_properties, host, strict=strict)
+
+ with_path = self.get_option('with_path')
+ if with_path:
+ parents = host_properties['path'].split('/')
+ if parents:
+ if isinstance(with_path, text_type):
+ parents = [with_path] + parents
+
+ c_name = self._sanitize_group_name('/'.join(parents))
+ c_group = self.inventory.add_group(c_name)
+ self.inventory.add_host(host, c_group)
+ parents.pop()
+
+ while len(parents) > 0:
+ p_name = self._sanitize_group_name('/'.join(parents))
+ p_group = self.inventory.add_group(p_name)
+
+ self.inventory.add_child(p_group, c_group)
+ c_group = p_group
+ parents.pop()
+
+ can_sanitize = self.get_option('with_sanitized_property_name')
+
+ # Sanitize host properties: to snake case
+ if can_sanitize: # to snake case
+ host_properties = camel_dict_to_snake_dict(host_properties)
+
+ with_nested_properties = self.get_option('with_nested_properties')
+ if with_nested_properties:
+ for k, v in host_properties.items():
+ k = self._sanitize_group_name(k) if can_sanitize else k
+ self.inventory.set_variable(host, k, v)
+
+ # For backward compatability
+ host_properties = to_flatten_dict(host_properties)
+ for k, v in host_properties.items():
+ k = self._sanitize_group_name(k) if can_sanitize else k
+ self.inventory.set_variable(host, k, v)
diff --git a/ansible_collections/community/vmware/plugins/module_utils/__init__.py b/ansible_collections/community/vmware/plugins/module_utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/module_utils/__init__.py
diff --git a/ansible_collections/community/vmware/plugins/module_utils/version.py b/ansible_collections/community/vmware/plugins/module_utils/version.py
new file mode 100644
index 000000000..3633a307b
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/module_utils/version.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Provide version object to compare version numbers."""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.six import raise_from
+
+try:
+ from ansible.module_utils.compat.version import LooseVersion # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ try:
+ from distutils.version import LooseVersion # noqa: F401
+ except ImportError as exc:
+ raise_from(ImportError('To use this plugin or module with ansible-core < 2.11, you need to use Python < 3.12 with distutils.version present'), exc)
+
+try:
+ from ansible.module_utils.compat.version import StrictVersion # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ try:
+ from distutils.version import StrictVersion # noqa: F401
+ except ImportError as exc:
+ raise_from(ImportError('To use this plugin or module with ansible-core < 2.11, you need to use Python < 3.12 with distutils.version present'), exc)
diff --git a/ansible_collections/community/vmware/plugins/module_utils/vm_device_helper.py b/ansible_collections/community/vmware/plugins/module_utils/vm_device_helper.py
new file mode 100644
index 000000000..6ecd5c656
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/module_utils/vm_device_helper.py
@@ -0,0 +1,516 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+from random import randint
+from ansible.module_utils.common.network import is_mac
+from ansible.module_utils.basic import missing_required_lib
+
+PYVMOMI_IMP_ERR = None
+try:
+ from pyVmomi import vim
+ HAS_PYVMOMI = True
+except ImportError:
+ PYVMOMI_IMP_ERR = traceback.format_exc()
+ HAS_PYVMOMI = False
+
+
+class PyVmomiDeviceHelper(object):
+ """ This class is a helper to create easily VMware Objects for PyVmomiHelper """
+
+ def __init__(self, module):
+ if not HAS_PYVMOMI:
+ module.fail_json(msg=missing_required_lib('PyVmomi'),
+ exception=PYVMOMI_IMP_ERR)
+
+ self.module = module
+ # This is not used for the multiple controller with multiple disks scenario,
+ # disk unit number can not be None
+ # self.next_disk_unit_number = 0
+ self.scsi_device_type = {
+ 'lsilogic': vim.vm.device.VirtualLsiLogicController,
+ 'paravirtual': vim.vm.device.ParaVirtualSCSIController,
+ 'buslogic': vim.vm.device.VirtualBusLogicController,
+ 'lsilogicsas': vim.vm.device.VirtualLsiLogicSASController
+ }
+ self.sata_device_type = vim.vm.device.VirtualAHCIController
+ self.nvme_device_type = vim.vm.device.VirtualNVMEController
+ self.ide_device_type = vim.vm.device.VirtualIDEController
+ self.disk_ctl_device_type = self.scsi_device_type.copy()
+ self.disk_ctl_device_type.update({
+ 'sata': self.sata_device_type,
+ 'nvme': self.nvme_device_type,
+ 'ide': self.ide_device_type
+ })
+ self.usb_device_type = {
+ 'usb2': vim.vm.device.VirtualUSBController,
+ 'usb3': vim.vm.device.VirtualUSBXHCIController
+ }
+ self.nic_device_type = {
+ 'pcnet32': vim.vm.device.VirtualPCNet32,
+ 'vmxnet2': vim.vm.device.VirtualVmxnet2,
+ 'vmxnet3': vim.vm.device.VirtualVmxnet3,
+ 'e1000': vim.vm.device.VirtualE1000,
+ 'e1000e': vim.vm.device.VirtualE1000e,
+ 'sriov': vim.vm.device.VirtualSriovEthernetCard,
+ 'pvrdma': vim.vm.device.VirtualVmxnet3Vrdma
+ }
+
+ def create_scsi_controller(self, scsi_type, bus_number, bus_sharing='noSharing'):
+ """
+ Create SCSI Controller with given SCSI Type and SCSI Bus Number
+ Args:
+ scsi_type: Type of SCSI
+ bus_number: SCSI Bus number to be assigned
+ bus_sharing: noSharing, virtualSharing, physicalSharing
+
+ Returns: Virtual device spec for SCSI Controller
+
+ """
+ scsi_ctl = vim.vm.device.VirtualDeviceSpec()
+ scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ scsi_device = self.scsi_device_type.get(scsi_type, vim.vm.device.ParaVirtualSCSIController)
+ scsi_ctl.device = scsi_device()
+ scsi_ctl.device.deviceInfo = vim.Description()
+ scsi_ctl.device.busNumber = bus_number
+ # While creating a new SCSI controller, temporary key value
+ # should be unique negative integers
+ scsi_ctl.device.key = -randint(1000, 9999)
+ scsi_ctl.device.hotAddRemove = True
+ scsi_ctl.device.sharedBus = bus_sharing
+ scsi_ctl.device.scsiCtlrUnitNumber = 7
+
+ return scsi_ctl
+
+ def is_scsi_controller(self, device):
+ return isinstance(device, tuple(self.scsi_device_type.values()))
+
+ @staticmethod
+ def create_sata_controller(bus_number):
+ sata_ctl = vim.vm.device.VirtualDeviceSpec()
+ sata_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ sata_ctl.device = vim.vm.device.VirtualAHCIController()
+ sata_ctl.device.deviceInfo = vim.Description()
+ sata_ctl.device.busNumber = bus_number
+ sata_ctl.device.key = -randint(15000, 19999)
+
+ return sata_ctl
+
+ @staticmethod
+ def is_sata_controller(device):
+ return isinstance(device, vim.vm.device.VirtualAHCIController)
+
+ @staticmethod
+ def create_nvme_controller(bus_number):
+ nvme_ctl = vim.vm.device.VirtualDeviceSpec()
+ nvme_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ nvme_ctl.device = vim.vm.device.VirtualNVMEController()
+ nvme_ctl.device.deviceInfo = vim.Description()
+ nvme_ctl.device.key = -randint(31000, 39999)
+ nvme_ctl.device.busNumber = bus_number
+
+ return nvme_ctl
+
+ @staticmethod
+ def is_nvme_controller(device):
+ return isinstance(device, vim.vm.device.VirtualNVMEController)
+
+ def create_disk_controller(self, ctl_type, ctl_number, bus_sharing='noSharing'):
+ disk_ctl = None
+ if ctl_type in self.scsi_device_type.keys():
+ disk_ctl = self.create_scsi_controller(ctl_type, ctl_number, bus_sharing)
+ if ctl_type == 'sata':
+ disk_ctl = self.create_sata_controller(ctl_number)
+ if ctl_type == 'nvme':
+ disk_ctl = self.create_nvme_controller(ctl_number)
+
+ return disk_ctl
+
+ def get_controller_disks(self, vm_obj, ctl_type, ctl_number):
+ disk_controller = None
+ disk_list = []
+ disk_key_list = []
+ if vm_obj is None:
+ return disk_controller, disk_list
+ disk_controller_type = self.scsi_device_type.copy()
+ disk_controller_type.update({'sata': vim.vm.device.VirtualAHCIController, 'nvme': vim.vm.device.VirtualNVMEController})
+ for device in vm_obj.config.hardware.device:
+ if isinstance(device, disk_controller_type[ctl_type]):
+ if device.busNumber == ctl_number:
+ disk_controller = device
+ disk_key_list = device.device
+ break
+ if len(disk_key_list) != 0:
+ for device in vm_obj.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualDisk):
+ if device.key in disk_key_list:
+ disk_list.append(device)
+
+ return disk_controller, disk_list
+
+ @staticmethod
+ def create_ide_controller(bus_number=0):
+ ide_ctl = vim.vm.device.VirtualDeviceSpec()
+ ide_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ ide_ctl.device = vim.vm.device.VirtualIDEController()
+ ide_ctl.device.deviceInfo = vim.Description()
+ # While creating a new IDE controller, temporary key value
+ # should be unique negative integers
+ ide_ctl.device.key = -randint(200, 299)
+ ide_ctl.device.busNumber = bus_number
+
+ return ide_ctl
+
+ @staticmethod
+ def create_cdrom(ctl_device, cdrom_type, iso_path=None, unit_number=0):
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ cdrom_spec.device = vim.vm.device.VirtualCdrom()
+ cdrom_spec.device.controllerKey = ctl_device.key
+ if isinstance(ctl_device, vim.vm.device.VirtualIDEController):
+ cdrom_spec.device.key = -randint(3000, 3999)
+ elif isinstance(ctl_device, vim.vm.device.VirtualAHCIController):
+ cdrom_spec.device.key = -randint(16000, 16999)
+ cdrom_spec.device.unitNumber = unit_number
+ cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+ cdrom_spec.device.connectable.allowGuestControl = True
+ cdrom_spec.device.connectable.startConnected = (cdrom_type != "none")
+ if cdrom_type in ["none", "client"]:
+ cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
+ elif cdrom_type == "iso":
+ cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
+ cdrom_spec.device.connectable.connected = True
+
+ return cdrom_spec
+
+ @staticmethod
+ def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path):
+ if cdrom_type == "none":
+ return (
+ isinstance(
+ cdrom_device.backing,
+ vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo,
+ )
+ and cdrom_device.connectable.allowGuestControl
+ and not cdrom_device.connectable.startConnected
+ and (
+ vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn
+ or not cdrom_device.connectable.connected
+ )
+ )
+ elif cdrom_type == "client":
+ return (
+ isinstance(
+ cdrom_device.backing,
+ vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo,
+ )
+ and cdrom_device.connectable.allowGuestControl
+ and cdrom_device.connectable.startConnected
+ and (
+ vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn
+ or cdrom_device.connectable.connected
+ )
+ )
+ elif cdrom_type == "iso":
+ return (
+ isinstance(
+ cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo
+ )
+ and cdrom_device.backing.fileName == iso_path
+ and cdrom_device.connectable.allowGuestControl
+ and cdrom_device.connectable.startConnected
+ and (
+ vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn
+ or cdrom_device.connectable.connected
+ )
+ )
+
+ @staticmethod
+ def update_cdrom_config(vm_obj, cdrom_spec, cdrom_device, iso_path=None):
+ # Updating an existing CD-ROM
+ if cdrom_spec["type"] in ["client", "none"]:
+ cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
+ elif cdrom_spec["type"] == "iso" and iso_path is not None:
+ cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
+ cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+ cdrom_device.connectable.allowGuestControl = True
+ cdrom_device.connectable.startConnected = (cdrom_spec["type"] != "none")
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ cdrom_device.connectable.connected = (cdrom_spec["type"] != "none")
+
+ def remove_cdrom(self, cdrom_device):
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
+ cdrom_spec.device = cdrom_device
+
+ return cdrom_spec
+
+ def create_hard_disk(self, disk_ctl, disk_index=None):
+ diskspec = vim.vm.device.VirtualDeviceSpec()
+ diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ diskspec.device = vim.vm.device.VirtualDisk()
+ diskspec.device.key = -randint(20000, 24999)
+ diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+ diskspec.device.controllerKey = disk_ctl.device.key
+
+ if self.is_scsi_controller(disk_ctl.device):
+ # one scsi controller attach 0-15 (except 7) disks
+ if disk_index is None:
+ self.module.fail_json(msg='unitNumber for sata disk is None.')
+ else:
+ if disk_index == 7 or disk_index > 15:
+ self.module.fail_json(msg='Invalid scsi disk unitNumber, valid 0-15(except 7).')
+ else:
+ diskspec.device.unitNumber = disk_index
+ elif self.is_sata_controller(disk_ctl.device):
+ # one sata controller attach 0-29 disks
+ if disk_index is None:
+ self.module.fail_json(msg='unitNumber for sata disk is None.')
+ else:
+ if disk_index > 29:
+ self.module.fail_json(msg='Invalid sata disk unitNumber, valid 0-29.')
+ else:
+ diskspec.device.unitNumber = disk_index
+ elif self.is_nvme_controller(disk_ctl.device):
+ # one nvme controller attach 0-14 disks
+ if disk_index is None:
+ self.module.fail_json(msg='unitNumber for nvme disk is None.')
+ else:
+ if disk_index > 14:
+ self.module.fail_json(msg='Invalid nvme disk unitNumber, valid 0-14.')
+ else:
+ diskspec.device.unitNumber = disk_index
+
+ return diskspec
+
+ def create_nic(self, device_type, device_label, device_infos):
+ nic = vim.vm.device.VirtualDeviceSpec()
+ nic_device = self.nic_device_type.get(device_type)
+ nic.device = nic_device()
+ nic.device.key = -randint(25000, 29999)
+ nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True))
+ nic.device.deviceInfo = vim.Description()
+ nic.device.deviceInfo.label = device_label
+ nic.device.deviceInfo.summary = device_infos['name']
+ nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+ nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True))
+ nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True))
+ nic.device.connectable.connected = bool(device_infos.get('connected', True))
+ if device_type == 'sriov':
+ pf_backing = device_infos.get('physical_function_backing', None)
+ vf_backing = device_infos.get('virtual_function_backing', None)
+
+ nic.device.allowGuestOSMtuChange = bool(device_infos.get('allow_guest_os_mtu_change', True))
+ nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
+ if pf_backing is not None:
+ nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+ nic.device.sriovBacking.physicalFunctionBacking.id = pf_backing
+ if vf_backing is not None:
+ nic.device.sriovBacking.virtualFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+ nic.device.sriovBacking.virtualFunctionBacking.id = vf_backing
+ if 'mac' in device_infos and is_mac(device_infos['mac']):
+ nic.device.addressType = 'manual'
+ nic.device.macAddress = device_infos['mac']
+ else:
+ nic.device.addressType = 'generated'
+
+ return nic
+
+ def integer_value(self, input_value, name):
+ """
+ Function to return int value for given input, else return error
+ Args:
+ input_value: Input value to retrieve int value from
+ name: Name of the Input value (used to build error message)
+ Returns: (int) if integer value can be obtained, otherwise will send a error message.
+ """
+ if isinstance(input_value, int):
+ return input_value
+ elif isinstance(input_value, str) and input_value.isdigit():
+ return int(input_value)
+ else:
+ self.module.fail_json(msg='"%s" attribute should be an'
+ ' integer value.' % name)
+
+ def create_nvdimm_controller(self):
+ nvdimm_ctl = vim.vm.device.VirtualDeviceSpec()
+ nvdimm_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ nvdimm_ctl.device = vim.vm.device.VirtualNVDIMMController()
+ nvdimm_ctl.device.deviceInfo = vim.Description()
+ nvdimm_ctl.device.key = -randint(27000, 27999)
+
+ return nvdimm_ctl
+
+ @staticmethod
+ def is_nvdimm_controller(device):
+ return isinstance(device, vim.vm.device.VirtualNVDIMMController)
+
+ def create_nvdimm_device(self, nvdimm_ctl_dev_key, pmem_profile_id, nvdimm_dev_size_mb=1024):
+ nvdimm_dev_spec = vim.vm.device.VirtualDeviceSpec()
+ nvdimm_dev_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ nvdimm_dev_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
+ nvdimm_dev_spec.device = vim.vm.device.VirtualNVDIMM()
+ nvdimm_dev_spec.device.controllerKey = nvdimm_ctl_dev_key
+ nvdimm_dev_spec.device.key = -randint(28000, 28999)
+ nvdimm_dev_spec.device.capacityInMB = nvdimm_dev_size_mb
+ nvdimm_dev_spec.device.deviceInfo = vim.Description()
+ nvdimm_dev_spec.device.backing = vim.vm.device.VirtualNVDIMM.BackingInfo()
+ if pmem_profile_id is not None:
+ profile = vim.vm.DefinedProfileSpec()
+ profile.profileId = pmem_profile_id
+ nvdimm_dev_spec.profile = [profile]
+
+ return nvdimm_dev_spec
+
+ @staticmethod
+ def is_nvdimm_device(device):
+ return isinstance(device, vim.vm.device.VirtualNVDIMM)
+
+ def find_nvdimm_by_label(self, nvdimm_label, nvdimm_devices):
+ nvdimm_dev = None
+ for nvdimm in nvdimm_devices:
+ if nvdimm.deviceInfo.label == nvdimm_label:
+ nvdimm_dev = nvdimm
+
+ return nvdimm_dev
+
+ def remove_nvdimm(self, nvdimm_device):
+ nvdimm_spec = vim.vm.device.VirtualDeviceSpec()
+ nvdimm_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
+ nvdimm_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.destroy
+ nvdimm_spec.device = nvdimm_device
+
+ return nvdimm_spec
+
+ def update_nvdimm_config(self, nvdimm_device, nvdimm_size):
+ nvdimm_spec = vim.vm.device.VirtualDeviceSpec()
+ nvdimm_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ nvdimm_spec.device = nvdimm_device
+ nvdimm_device.capacityInMB = nvdimm_size
+
+ return nvdimm_spec
+
+ def is_tpm_device(self, device):
+ return isinstance(device, vim.vm.device.VirtualTPM)
+
+ def create_tpm(self):
+ vtpm_device_spec = vim.vm.device.VirtualDeviceSpec()
+ vtpm_device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ vtpm_device_spec.device = vim.vm.device.VirtualTPM()
+ vtpm_device_spec.device.deviceInfo = vim.Description()
+
+ return vtpm_device_spec
+
+ def remove_tpm(self, vtpm_device):
+ vtpm_device_spec = vim.vm.device.VirtualDeviceSpec()
+ vtpm_device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
+ vtpm_device_spec.device = vtpm_device
+
+ return vtpm_device_spec
+
+ def gather_disk_info(self, vm_obj):
+ """
+ Gather information about VM's disks
+ Args:
+ vm_obj: Managed object of virtual machine
+ Returns: A list of dict containing disks information
+ """
+ controller_info = dict()
+ disks_info = dict()
+ if vm_obj is None:
+ return disks_info
+
+ controller_index = 0
+ for controller in vm_obj.config.hardware.device:
+ for name, type in self.disk_ctl_device_type.items():
+ if isinstance(controller, type):
+ controller_info[controller_index] = dict(
+ key=controller.key,
+ controller_type=name,
+ bus_number=controller.busNumber,
+ devices=controller.device
+ )
+ controller_index += 1
+
+ disk_index = 0
+ for disk in vm_obj.config.hardware.device:
+ if isinstance(disk, vim.vm.device.VirtualDisk):
+ if disk.storageIOAllocation is None:
+ disk.storageIOAllocation = vim.StorageResourceManager.IOAllocationInfo()
+ disk.storageIOAllocation.shares = vim.SharesInfo()
+
+ if disk.shares is None:
+ disk.shares = vim.SharesInfo()
+
+ disks_info[disk_index] = dict(
+ key=disk.key,
+ label=disk.deviceInfo.label,
+ summary=disk.deviceInfo.summary,
+ backing_filename=disk.backing.fileName,
+ backing_datastore=disk.backing.datastore.name,
+ backing_sharing=disk.backing.sharing if hasattr(disk.backing, 'sharing') else None,
+ backing_uuid=disk.backing.uuid if hasattr(disk.backing, 'uuid') else None,
+ backing_writethrough=disk.backing.writeThrough if hasattr(disk.backing, 'writeThrough') else None,
+ backing_diskmode=disk.backing.diskMode if hasattr(disk.backing, 'diskMode') else None,
+ backing_disk_mode=disk.backing.diskMode if hasattr(disk.backing, 'diskMode') else None,
+ iolimit_limit=disk.storageIOAllocation.limit,
+ iolimit_shares_level=disk.storageIOAllocation.shares.level,
+ iolimit_shares_limit=disk.storageIOAllocation.shares.shares,
+ shares_level=disk.shares.level,
+ shares_limit=disk.shares.shares,
+ controller_key=disk.controllerKey,
+ unit_number=disk.unitNumber,
+ capacity_in_kb=disk.capacityInKB,
+ capacity_in_bytes=disk.capacityInBytes,
+ )
+ if isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer1BackingInfo):
+ disks_info[disk_index]['backing_type'] = 'FlatVer1'
+
+ elif isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo):
+ disks_info[disk_index]['backing_type'] = 'FlatVer2'
+ disks_info[disk_index]['backing_thinprovisioned'] = disk.backing.thinProvisioned
+ disks_info[disk_index]['backing_eagerlyscrub'] = disk.backing.eagerlyScrub
+
+ elif isinstance(disk.backing, vim.vm.device.VirtualDisk.LocalPMemBackingInfo):
+ disks_info[disk_index]['backing_type'] = 'LocalPMem'
+ disks_info[disk_index]['backing_volumeuuid'] = disk.backing.volumeUUID
+
+ elif isinstance(disk.backing, vim.vm.device.VirtualDisk.PartitionedRawDiskVer2BackingInfo):
+ disks_info[disk_index]['backing_type'] = 'PartitionedRawDiskVer2'
+ disks_info[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName
+
+ elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo):
+ disks_info[disk_index]['backing_type'] = 'RawDiskMappingVer1'
+ disks_info[disk_index]['backing_devicename'] = disk.backing.deviceName
+ disks_info[disk_index]['backing_lunuuid'] = disk.backing.lunUuid
+ disks_info[disk_index]['backing_compatibility_mode'] = disk.backing.compatibilityMode
+
+ elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskVer2BackingInfo):
+ disks_info[disk_index]['backing_type'] = 'RawDiskVer2'
+ disks_info[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName
+
+ elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SeSparseBackingInfo):
+ disks_info[disk_index]['backing_type'] = 'SeSparse'
+
+ elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer1BackingInfo):
+ disks_info[disk_index]['backing_type'] = 'SparseVer1'
+ disks_info[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB
+ disks_info[disk_index]['backing_split'] = disk.backing.split
+
+ elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer2BackingInfo):
+ disks_info[disk_index]['backing_type'] = 'SparseVer2'
+ disks_info[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB
+ disks_info[disk_index]['backing_split'] = disk.backing.split
+
+ for controller_index in range(len(controller_info)):
+ if controller_info[controller_index]['key'] == disks_info[disk_index]['controller_key']:
+ disks_info[disk_index]['controller_bus_number'] = controller_info[controller_index]['bus_number']
+ disks_info[disk_index]['controller_type'] = controller_info[controller_index]['controller_type']
+
+ disk_index += 1
+ return disks_info
diff --git a/ansible_collections/community/vmware/plugins/module_utils/vmware.py b/ansible_collections/community/vmware/plugins/module_utils/vmware.py
new file mode 100644
index 000000000..926cf2932
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/module_utils/vmware.py
@@ -0,0 +1,1999 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, James E. King III (@jeking3) <jking@apache.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import atexit
+import ansible.module_utils.common._collections_compat as collections_compat
+import json
+import os
+import re
+import socket
+import ssl
+import hashlib
+import time
+import traceback
+import datetime
+from collections import OrderedDict
+from ansible_collections.community.vmware.plugins.module_utils.version import StrictVersion
+from random import randint
+
+REQUESTS_IMP_ERR = None
+try:
+ # requests is required for exception handling of the ConnectionError
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+PYVMOMI_IMP_ERR = None
+try:
+ from pyVim import connect
+ from pyVmomi import vim, vmodl, VmomiSupport
+ HAS_PYVMOMI = True
+ HAS_PYVMOMIJSON = hasattr(VmomiSupport, 'VmomiJSONEncoder')
+except ImportError:
+ PYVMOMI_IMP_ERR = traceback.format_exc()
+ HAS_PYVMOMI = False
+ HAS_PYVMOMIJSON = False
+
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.six import integer_types, iteritems, string_types, raise_from
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import unquote
+
+
+class TaskError(Exception):
+ def __init__(self, *args, **kwargs):
+ super(TaskError, self).__init__(*args, **kwargs)
+
+
+class ApiAccessError(Exception):
+ def __init__(self, *args, **kwargs):
+ super(ApiAccessError, self).__init__(*args, **kwargs)
+
+
+def check_answer_question_status(vm):
+ """Check whether locked a virtual machine.
+
+ Args:
+ vm: Virtual machine management object
+
+ Returns: bool
+ """
+ if hasattr(vm, "runtime") and vm.runtime.question:
+ return True
+
+ return False
+
+
+def make_answer_response(vm, answers):
+ """Make the response contents to answer against locked a virtual machine.
+
+ Args:
+ vm: Virtual machine management object
+ answers: Answer contents
+
+ Returns: Dict with answer id and number
+ Raises: TaskError on failure
+ """
+ response_list = {}
+ for message in vm.runtime.question.message:
+ response_list[message.id] = {}
+ for choice in vm.runtime.question.choice.choiceInfo:
+ response_list[message.id].update({
+ choice.label: choice.key
+ })
+
+ responses = []
+ try:
+ for answer in answers:
+ responses.append({
+ "id": vm.runtime.question.id,
+ "response_num": response_list[answer["question"]][answer["response"]]
+ })
+ except Exception:
+ raise TaskError("not found %s or %s or both in the response list" % (answer["question"], answer["response"]))
+
+ return responses
+
+
+def answer_question(vm, responses):
+ """Answer against the question for unlocking a virtual machine.
+
+ Args:
+ vm: Virtual machine management object
+ responses: Answer contents to unlock a virtual machine
+ """
+ for response in responses:
+ try:
+ vm.AnswerVM(response["id"], response["response_num"])
+ except Exception as e:
+ raise TaskError("answer failed: %s" % to_text(e))
+
+
+def wait_for_task(task, max_backoff=64, timeout=3600, vm=None, answers=None):
+ """Wait for given task using exponential back-off algorithm.
+
+ Args:
+ task: VMware task object
+ max_backoff: Maximum amount of sleep time in seconds
+ timeout: Timeout for the given task in seconds
+
+ Returns: Tuple with True and result for successful task
+ Raises: TaskError on failure
+ """
+ failure_counter = 0
+ start_time = time.time()
+
+ while True:
+ if check_answer_question_status(vm):
+ if answers:
+ responses = make_answer_response(vm, answers)
+ answer_question(vm, responses)
+ else:
+ raise TaskError("%s" % to_text(vm.runtime.question.text))
+ if time.time() - start_time >= timeout:
+ raise TaskError("Timeout")
+ if task.info.state == vim.TaskInfo.State.success:
+ return True, task.info.result
+ if task.info.state == vim.TaskInfo.State.error:
+ error_msg = task.info.error
+ host_thumbprint = None
+ try:
+ error_msg = error_msg.msg
+ if hasattr(task.info.error, 'thumbprint'):
+ host_thumbprint = task.info.error.thumbprint
+ except AttributeError:
+ pass
+ finally:
+ raise_from(TaskError(error_msg, host_thumbprint), task.info.error)
+ if task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]:
+ sleep_time = min(2 ** failure_counter + randint(1, 1000) / 1000, max_backoff)
+ time.sleep(sleep_time)
+ failure_counter += 1
+
+
+def wait_for_vm_ip(content, vm, timeout=300):
+ facts = dict()
+ interval = 15
+ while timeout > 0:
+ _facts = gather_vm_facts(content, vm)
+ if _facts['ipv4'] or _facts['ipv6']:
+ facts = _facts
+ break
+ time.sleep(interval)
+ timeout -= interval
+
+ return facts
+
+
+def find_obj(content, vimtype, name, first=True, folder=None):
+ container = content.viewManager.CreateContainerView(folder or content.rootFolder, recursive=True, type=vimtype)
+ # Get all objects matching type (and name if given)
+ obj_list = [obj for obj in container.view if not name or to_text(unquote(obj.name)) == to_text(unquote(name))]
+ container.Destroy()
+
+ # Return first match or None
+ if first:
+ if obj_list:
+ return obj_list[0]
+ return None
+
+ # Return all matching objects or empty list
+ return obj_list
+
+
+def find_dvspg_by_name(dv_switch, portgroup_name):
+ portgroup_name = quote_obj_name(portgroup_name)
+ portgroups = dv_switch.portgroup
+
+ for pg in portgroups:
+ if pg.name == portgroup_name:
+ return pg
+
+ return None
+
+
+def find_object_by_name(content, name, obj_type, folder=None, recurse=True):
+ if not isinstance(obj_type, list):
+ obj_type = [obj_type]
+
+ name = name.strip()
+
+ objects = get_all_objs(content, obj_type, folder=folder, recurse=recurse)
+ for obj in objects:
+ try:
+ if unquote(obj.name) == name:
+ return obj
+ except vmodl.fault.ManagedObjectNotFound:
+ pass
+
+ return None
+
+
+def find_cluster_by_name(content, cluster_name, datacenter=None):
+ if datacenter and hasattr(datacenter, 'hostFolder'):
+ folder = datacenter.hostFolder
+ else:
+ folder = content.rootFolder
+
+ return find_object_by_name(content, cluster_name, [vim.ClusterComputeResource], folder=folder)
+
+
+def find_datacenter_by_name(content, datacenter_name):
+ return find_object_by_name(content, datacenter_name, [vim.Datacenter])
+
+
+def get_parent_datacenter(obj):
+ """ Walk the parent tree to find the objects datacenter """
+ if isinstance(obj, vim.Datacenter):
+ return obj
+ datacenter = None
+ while True:
+ if not hasattr(obj, 'parent'):
+ break
+ obj = obj.parent or obj.parentVApp
+ if isinstance(obj, vim.Datacenter):
+ datacenter = obj
+ break
+ return datacenter
+
+
+def find_datastore_by_name(content, datastore_name, datacenter_name=None):
+ return find_object_by_name(content, datastore_name, [vim.Datastore], datacenter_name)
+
+
+def find_folder_by_name(content, folder_name):
+ return find_object_by_name(content, folder_name, [vim.Folder])
+
+
+def find_folder_by_fqpn(content, folder_name, datacenter_name=None, folder_type=None):
+ """
+ Find the folder by its given fully qualified path name.
+ The Fully Qualified Path Name is I(datacenter)/I(folder type)/folder name/
+ for example - Lab/vm/someparent/myfolder is a vm folder in the Lab datacenter.
+ """
+ # Remove leading/trailing slashes and create list of subfolders
+ folder = folder_name.strip('/')
+ folder_parts = folder.strip('/').split('/')
+
+ # Process datacenter
+ if len(folder_parts) > 0:
+ if not datacenter_name:
+ datacenter_name = folder_parts[0]
+ if datacenter_name == folder_parts[0]:
+ folder_parts.pop(0)
+ datacenter = find_datacenter_by_name(content, datacenter_name)
+ if not datacenter:
+ return None
+
+ # Process folder type
+ if len(folder_parts) > 0:
+ if not folder_type:
+ folder_type = folder_parts[0]
+ if folder_type == folder_parts[0]:
+ folder_parts.pop(0)
+ if folder_type in ['vm', 'host', 'datastore', 'network']:
+ parent_obj = getattr(datacenter, "%sFolder" % folder_type.lower())
+ else:
+ return None
+
+ # Process remaining subfolders
+ if len(folder_parts) > 0:
+ for part in folder_parts:
+ folder_obj = None
+ for part_obj in parent_obj.childEntity:
+ if part_obj.name == part and ('Folder' in part_obj.childType or vim.Folder in part_obj.childType):
+ folder_obj = part_obj
+ parent_obj = part_obj
+ break
+ if not folder_obj:
+ return None
+ else:
+ folder_obj = parent_obj
+ return folder_obj
+
+
+def find_dvs_by_name(content, switch_name, folder=None):
+ return find_object_by_name(content, switch_name, [vim.DistributedVirtualSwitch], folder=folder)
+
+
+def find_hostsystem_by_name(content, hostname, datacenter=None):
+ if datacenter and hasattr(datacenter, 'hostFolder'):
+ folder = datacenter.hostFolder
+ else:
+ folder = content.rootFolder
+ return find_object_by_name(content, hostname, [vim.HostSystem], folder=folder)
+
+
+def find_resource_pool_by_name(content, resource_pool_name):
+ return find_object_by_name(content, resource_pool_name, [vim.ResourcePool])
+
+
+def find_resource_pool_by_cluster(content, resource_pool_name='Resources', cluster=None):
+ return find_object_by_name(content, resource_pool_name, [vim.ResourcePool], folder=cluster)
+
+
+def find_network_by_name(content, network_name, datacenter_name=None):
+ return find_object_by_name(content, network_name, [vim.Network], datacenter_name)
+
+
+def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None,
+ cluster=None, folder=None, match_first=False):
+ """ UUID is unique to a VM, every other id returns the first match. """
+ si = content.searchIndex
+ vm = None
+
+ if vm_id_type == 'dns_name':
+ vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
+ elif vm_id_type == 'uuid':
+ # Search By BIOS UUID rather than instance UUID
+ vm = si.FindByUuid(datacenter=datacenter, instanceUuid=False, uuid=vm_id, vmSearch=True)
+ elif vm_id_type == 'instance_uuid':
+ vm = si.FindByUuid(datacenter=datacenter, instanceUuid=True, uuid=vm_id, vmSearch=True)
+ elif vm_id_type == 'ip':
+ vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True)
+ elif vm_id_type == 'vm_name':
+ folder = None
+ if cluster:
+ folder = cluster
+ elif datacenter:
+ folder = datacenter.hostFolder
+ vm = find_vm_by_name(content, vm_id, folder)
+ elif vm_id_type == 'inventory_path':
+ searchpath = folder
+ # get all objects for this path
+ f_obj = si.FindByInventoryPath(searchpath)
+ if f_obj:
+ if isinstance(f_obj, vim.Datacenter):
+ f_obj = f_obj.vmFolder
+ for c_obj in f_obj.childEntity:
+ if not isinstance(c_obj, vim.VirtualMachine):
+ continue
+ if c_obj.name == vm_id:
+ vm = c_obj
+ if match_first:
+ break
+ return vm
+
+
+def find_vm_by_name(content, vm_name, folder=None, recurse=True):
+ return find_object_by_name(content, vm_name, [vim.VirtualMachine], folder=folder, recurse=recurse)
+
+
+def find_host_portgroup_by_name(host, portgroup_name):
+
+ for portgroup in host.config.network.portgroup:
+ if portgroup.spec.name == portgroup_name:
+ return portgroup
+ return None
+
+
+def compile_folder_path_for_object(vobj):
+ """ make a /vm/foo/bar/baz like folder path for an object """
+
+ paths = []
+ if isinstance(vobj, vim.Folder):
+ paths.append(vobj.name)
+
+ thisobj = vobj
+ while hasattr(thisobj, 'parent'):
+ thisobj = thisobj.parent
+ try:
+ moid = thisobj._moId
+ except AttributeError:
+ moid = None
+ if moid in ['group-d1', 'ha-folder-root']:
+ break
+ if isinstance(thisobj, vim.Folder):
+ paths.append(thisobj.name)
+ paths.reverse()
+ return '/' + '/'.join(paths)
+
+
+def _get_vm_prop(vm, attributes):
+ """Safely get a property or return None"""
+ result = vm
+ for attribute in attributes:
+ try:
+ result = getattr(result, attribute)
+ except (AttributeError, IndexError):
+ return None
+ return result
+
+
+def gather_vm_facts(content, vm):
+ """ Gather facts from vim.VirtualMachine object. """
+ facts = {
+ 'module_hw': True,
+ 'hw_name': vm.config.name,
+ 'hw_power_status': vm.summary.runtime.powerState,
+ 'hw_guest_full_name': vm.summary.guest.guestFullName,
+ 'hw_guest_id': vm.summary.guest.guestId,
+ 'hw_product_uuid': vm.config.uuid,
+ 'hw_processor_count': vm.config.hardware.numCPU,
+ 'hw_cores_per_socket': vm.config.hardware.numCoresPerSocket,
+ 'hw_memtotal_mb': vm.config.hardware.memoryMB,
+ 'hw_interfaces': [],
+ 'hw_datastores': [],
+ 'hw_files': [],
+ 'hw_esxi_host': None,
+ 'hw_guest_ha_state': None,
+ 'hw_is_template': vm.config.template,
+ 'hw_folder': None,
+ 'hw_version': vm.config.version,
+ 'instance_uuid': vm.config.instanceUuid,
+ 'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')),
+ 'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')),
+ 'guest_question': json.loads(json.dumps(vm.summary.runtime.question, cls=VmomiSupport.VmomiJSONEncoder,
+ sort_keys=True, strip_dynamic=True)),
+ 'guest_consolidation_needed': vm.summary.runtime.consolidationNeeded,
+ 'ipv4': None,
+ 'ipv6': None,
+ 'annotation': vm.config.annotation,
+ 'customvalues': {},
+ 'snapshots': [],
+ 'current_snapshot': None,
+ 'vnc': {},
+ 'moid': vm._moId,
+ 'vimref': "vim.VirtualMachine:%s" % vm._moId,
+ 'advanced_settings': {},
+ }
+
+ # facts that may or may not exist
+ if vm.summary.runtime.host:
+ try:
+ host = vm.summary.runtime.host
+ facts['hw_esxi_host'] = host.summary.config.name
+ facts['hw_cluster'] = host.parent.name if host.parent and isinstance(host.parent, vim.ClusterComputeResource) else None
+
+ except vim.fault.NoPermission:
+ # User does not have read permission for the host system,
+ # proceed without this value. This value does not contribute or hamper
+ # provisioning or power management operations.
+ pass
+ if vm.summary.runtime.dasVmProtection:
+ facts['hw_guest_ha_state'] = vm.summary.runtime.dasVmProtection.dasProtected
+
+ datastores = vm.datastore
+ for ds in datastores:
+ facts['hw_datastores'].append(ds.info.name)
+
+ try:
+ files = vm.config.files
+ layout = vm.layout
+ if files:
+ facts['hw_files'] = [files.vmPathName]
+ for item in layout.snapshot:
+ for snap in item.snapshotFile:
+ if 'vmsn' in snap:
+ facts['hw_files'].append(snap)
+ for item in layout.configFile:
+ facts['hw_files'].append(os.path.join(os.path.dirname(files.vmPathName), item))
+ for item in vm.layout.logFile:
+ facts['hw_files'].append(os.path.join(files.logDirectory, item))
+ for item in vm.layout.disk:
+ for disk in item.diskFile:
+ facts['hw_files'].append(disk)
+ except Exception:
+ pass
+
+ facts['hw_folder'] = PyVmomi.get_vm_path(content, vm)
+
+ cfm = content.customFieldsManager
+ # Resolve custom values
+ for value_obj in vm.summary.customValue:
+ kn = value_obj.key
+ if cfm is not None and cfm.field:
+ for f in cfm.field:
+ if f.key == value_obj.key:
+ kn = f.name
+ # Exit the loop immediately, we found it
+ break
+
+ facts['customvalues'][kn] = value_obj.value
+
+ # Resolve advanced settings
+ for advanced_setting in vm.config.extraConfig:
+ facts['advanced_settings'][advanced_setting.key] = advanced_setting.value
+
+ net_dict = {}
+ vmnet = _get_vm_prop(vm, ('guest', 'net'))
+ if vmnet:
+ for device in vmnet:
+ if device.deviceConfigId > 0:
+ net_dict[device.macAddress] = list(device.ipAddress)
+
+ if vm.guest.ipAddress:
+ if ':' in vm.guest.ipAddress:
+ facts['ipv6'] = vm.guest.ipAddress
+ else:
+ facts['ipv4'] = vm.guest.ipAddress
+
+ ethernet_idx = 0
+ for entry in vm.config.hardware.device:
+ if not hasattr(entry, 'macAddress'):
+ continue
+
+ if entry.macAddress:
+ mac_addr = entry.macAddress
+ mac_addr_dash = mac_addr.replace(':', '-')
+ else:
+ mac_addr = mac_addr_dash = None
+
+ if (
+ hasattr(entry, "backing")
+ and hasattr(entry.backing, "port")
+ and hasattr(entry.backing.port, "portKey")
+ and hasattr(entry.backing.port, "portgroupKey")
+ ):
+ port_group_key = entry.backing.port.portgroupKey
+ port_key = entry.backing.port.portKey
+ else:
+ port_group_key = None
+ port_key = None
+
+ factname = 'hw_eth' + str(ethernet_idx)
+ facts[factname] = {
+ 'addresstype': entry.addressType,
+ 'label': entry.deviceInfo.label,
+ 'macaddress': mac_addr,
+ 'ipaddresses': net_dict.get(entry.macAddress, None),
+ 'macaddress_dash': mac_addr_dash,
+ 'summary': entry.deviceInfo.summary,
+ 'portgroup_portkey': port_key,
+ 'portgroup_key': port_group_key,
+ }
+ facts['hw_interfaces'].append('eth' + str(ethernet_idx))
+ ethernet_idx += 1
+
+ snapshot_facts = list_snapshots(vm)
+ if 'snapshots' in snapshot_facts:
+ facts['snapshots'] = snapshot_facts['snapshots']
+ facts['current_snapshot'] = snapshot_facts['current_snapshot']
+
+ facts['vnc'] = get_vnc_extraconfig(vm)
+
+ # Gather vTPM information
+ facts['tpm_info'] = {
+ 'tpm_present': vm.summary.config.tpmPresent if hasattr(vm.summary.config, 'tpmPresent') else None,
+ 'provider_id': vm.config.keyId.providerId.id if vm.config.keyId else None
+ }
+ return facts
+
+
+def ansible_date_time_facts(timestamp):
+ # timestamp is a datetime.datetime object
+ date_time_facts = {}
+ if timestamp is None:
+ return date_time_facts
+
+ utctimestamp = timestamp.astimezone(datetime.timezone.utc)
+
+ date_time_facts['year'] = timestamp.strftime('%Y')
+ date_time_facts['month'] = timestamp.strftime('%m')
+ date_time_facts['weekday'] = timestamp.strftime('%A')
+ date_time_facts['weekday_number'] = timestamp.strftime('%w')
+ date_time_facts['weeknumber'] = timestamp.strftime('%W')
+ date_time_facts['day'] = timestamp.strftime('%d')
+ date_time_facts['hour'] = timestamp.strftime('%H')
+ date_time_facts['minute'] = timestamp.strftime('%M')
+ date_time_facts['second'] = timestamp.strftime('%S')
+ date_time_facts['epoch'] = timestamp.strftime('%s')
+ date_time_facts['date'] = timestamp.strftime('%Y-%m-%d')
+ date_time_facts['time'] = timestamp.strftime('%H:%M:%S')
+ date_time_facts['iso8601_micro'] = utctimestamp.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+ date_time_facts['iso8601'] = utctimestamp.strftime("%Y-%m-%dT%H:%M:%SZ")
+ date_time_facts['iso8601_basic'] = timestamp.strftime("%Y%m%dT%H%M%S%f")
+ date_time_facts['iso8601_basic_short'] = timestamp.strftime("%Y%m%dT%H%M%S")
+ date_time_facts['tz'] = timestamp.strftime("%Z")
+ date_time_facts['tz_offset'] = timestamp.strftime("%z")
+
+ return date_time_facts
+
+
+def deserialize_snapshot_obj(obj):
+ return {'id': obj.id,
+ 'name': obj.name,
+ 'description': obj.description,
+ 'creation_time': obj.createTime,
+ 'state': obj.state,
+ 'quiesced': obj.quiesced}
+
+
+def list_snapshots_recursively(snapshots):
+ snapshot_data = []
+ for snapshot in snapshots:
+ snapshot_data.append(deserialize_snapshot_obj(snapshot))
+ snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList)
+ return snapshot_data
+
+
+def get_current_snap_obj(snapshots, snapob):
+ snap_obj = []
+ for snapshot in snapshots:
+ if snapshot.snapshot == snapob:
+ snap_obj.append(snapshot)
+ snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob)
+ return snap_obj
+
+
+def list_snapshots(vm):
+ result = {}
+ snapshot = _get_vm_prop(vm, ('snapshot',))
+ if not snapshot:
+ return result
+ if vm.snapshot is None:
+ return result
+
+ result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList)
+ current_snapref = vm.snapshot.currentSnapshot
+ current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
+ if current_snap_obj:
+ result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0])
+ else:
+ result['current_snapshot'] = dict()
+ return result
+
+
+def get_vnc_extraconfig(vm):
+ result = {}
+ for opts in vm.config.extraConfig:
+ for optkeyname in ['enabled', 'ip', 'port', 'password']:
+ if opts.key.lower() == "remotedisplay.vnc." + optkeyname:
+ result[optkeyname] = opts.value
+ return result
+
+
+def vmware_argument_spec():
+ return dict(
+ hostname=dict(type='str',
+ required=False,
+ fallback=(env_fallback, ['VMWARE_HOST']),
+ ),
+ username=dict(type='str',
+ aliases=['user', 'admin'],
+ required=False,
+ fallback=(env_fallback, ['VMWARE_USER'])),
+ password=dict(type='str',
+ aliases=['pass', 'pwd'],
+ required=False,
+ no_log=True,
+ fallback=(env_fallback, ['VMWARE_PASSWORD'])),
+ port=dict(type='int',
+ default=443,
+ fallback=(env_fallback, ['VMWARE_PORT'])),
+ validate_certs=dict(type='bool',
+ required=False,
+ default=True,
+ fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS'])
+ ),
+ proxy_host=dict(type='str',
+ required=False,
+ default=None,
+ fallback=(env_fallback, ['VMWARE_PROXY_HOST'])),
+ proxy_port=dict(type='int',
+ required=False,
+ default=None,
+ fallback=(env_fallback, ['VMWARE_PROXY_PORT'])),
+ )
+
+
+def connect_to_api(module, disconnect_atexit=True, return_si=False, hostname=None, username=None, password=None, port=None, validate_certs=None,
+ httpProxyHost=None, httpProxyPort=None):
+ if module:
+ if not hostname:
+ hostname = module.params['hostname']
+ if not username:
+ username = module.params['username']
+ if not password:
+ password = module.params['password']
+ if not httpProxyHost:
+ httpProxyHost = module.params.get('proxy_host')
+ if not httpProxyPort:
+ httpProxyPort = module.params.get('proxy_port')
+ if not port:
+ port = module.params.get('port', 443)
+ if not validate_certs:
+ validate_certs = module.params['validate_certs']
+
+ def _raise_or_fail(msg):
+ if module is not None:
+ module.fail_json(msg=msg)
+ raise ApiAccessError(msg)
+
+ if not hostname:
+ _raise_or_fail(msg="Hostname parameter is missing."
+ " Please specify this parameter in task or"
+ " export environment variable like 'export VMWARE_HOST=ESXI_HOSTNAME'")
+
+ if not username:
+ _raise_or_fail(msg="Username parameter is missing."
+ " Please specify this parameter in task or"
+ " export environment variable like 'export VMWARE_USER=ESXI_USERNAME'")
+
+ if not password:
+ _raise_or_fail(msg="Password parameter is missing."
+ " Please specify this parameter in task or"
+ " export environment variable like 'export VMWARE_PASSWORD=ESXI_PASSWORD'")
+
+ if validate_certs and not hasattr(ssl, 'SSLContext'):
+ _raise_or_fail(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update '
+ 'python or use validate_certs=false.')
+ elif validate_certs:
+ ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ ssl_context.verify_mode = ssl.CERT_REQUIRED
+ ssl_context.check_hostname = True
+ ssl_context.load_default_certs()
+ elif hasattr(ssl, 'SSLContext'):
+ ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ ssl_context.verify_mode = ssl.CERT_NONE
+ ssl_context.check_hostname = False
+ else: # Python < 2.7.9 or RHEL/Centos < 7.4
+ ssl_context = None
+
+ service_instance = None
+
+ connect_args = dict(
+ host=hostname,
+ port=port,
+ )
+ if ssl_context:
+ connect_args.update(sslContext=ssl_context)
+
+ msg_suffix = ''
+ try:
+ if httpProxyHost:
+ msg_suffix = " [proxy: %s:%d]" % (httpProxyHost, httpProxyPort)
+ connect_args.update(httpProxyHost=httpProxyHost, httpProxyPort=httpProxyPort)
+ smart_stub = connect.SmartStubAdapter(**connect_args)
+ session_stub = connect.VimSessionOrientedStub(smart_stub, connect.VimSessionOrientedStub.makeUserLoginMethod(username, password))
+ service_instance = vim.ServiceInstance('ServiceInstance', session_stub)
+ else:
+ connect_args.update(user=username, pwd=password)
+ service_instance = connect.SmartConnect(**connect_args)
+ except vim.fault.InvalidLogin as invalid_login:
+ msg = "Unable to log on to vCenter or ESXi API at %s:%s " % (hostname, port)
+ _raise_or_fail(msg="%s as %s: %s" % (msg, username, invalid_login.msg) + msg_suffix)
+ except vim.fault.NoPermission as no_permission:
+ _raise_or_fail(msg="User %s does not have required permission"
+ " to log on to vCenter or ESXi API at %s:%s : %s" % (username, hostname, port, no_permission.msg))
+ except (requests.ConnectionError, ssl.SSLError) as generic_req_exc:
+ _raise_or_fail(msg="Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" % (hostname, port, generic_req_exc))
+ except vmodl.fault.InvalidRequest as invalid_request:
+ # Request is malformed
+ msg = "Failed to get a response from server %s:%s " % (hostname, port)
+ _raise_or_fail(msg="%s as request is malformed: %s" % (msg, invalid_request.msg) + msg_suffix)
+ except Exception as generic_exc:
+ msg = "Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port) + msg_suffix
+ _raise_or_fail(msg="%s : %s" % (msg, generic_exc))
+
+ if service_instance is None:
+ msg = "Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port)
+ _raise_or_fail(msg=msg + msg_suffix)
+
+ # Disabling atexit should be used in special cases only.
+ # Such as IP change of the ESXi host which removes the connection anyway.
+ # Also removal significantly speeds up the return of the module
+ if disconnect_atexit:
+ atexit.register(connect.Disconnect, service_instance)
+ if return_si:
+ return service_instance, service_instance.RetrieveContent()
+ return service_instance.RetrieveContent()
+
+
+def get_all_objs(content, vimtype, folder=None, recurse=True):
+ if not folder:
+ folder = content.rootFolder
+
+ obj = {}
+ container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
+ for managed_object_ref in container.view:
+ try:
+ obj.update({managed_object_ref: managed_object_ref.name})
+ except vmodl.fault.ManagedObjectNotFound:
+ pass
+ return obj
+
+
+def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env):
+
+ result = {'failed': False}
+
+ tools_status = vm.guest.toolsStatus
+ if (tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning'):
+ result['failed'] = True
+ result['msg'] = "VMwareTools is not installed or is not running in the guest"
+ return result
+
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
+ creds = vim.vm.guest.NamePasswordAuthentication(
+ username=username, password=password
+ )
+
+ try:
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
+ pm = content.guestOperationsManager.processManager
+ # https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
+ ps = vim.vm.guest.ProcessManager.ProgramSpec(
+ # programPath=program,
+ # arguments=args
+ programPath=program_path,
+ arguments=program_args,
+ workingDirectory=program_cwd,
+ )
+
+ res = pm.StartProgramInGuest(vm, creds, ps)
+ result['pid'] = res
+ pdata = pm.ListProcessesInGuest(vm, creds, [res])
+
+ # wait for pid to finish
+ while not pdata[0].endTime:
+ time.sleep(1)
+ pdata = pm.ListProcessesInGuest(vm, creds, [res])
+
+ result['owner'] = pdata[0].owner
+ result['startTime'] = pdata[0].startTime.isoformat()
+ result['endTime'] = pdata[0].endTime.isoformat()
+ result['exitCode'] = pdata[0].exitCode
+ if result['exitCode'] != 0:
+ result['failed'] = True
+ result['msg'] = "program exited non-zero"
+ else:
+ result['msg'] = "program completed successfully"
+
+ except Exception as e:
+ result['msg'] = str(e)
+ result['failed'] = True
+
+ return result
+
+
+def serialize_spec(clonespec):
+ """Serialize a clonespec or a relocation spec"""
+ data = {}
+ attrs = dir(clonespec)
+ attrs = [x for x in attrs if not x.startswith('_')]
+ for x in attrs:
+ xo = getattr(clonespec, x)
+ if callable(xo):
+ continue
+ xt = type(xo)
+ if xo is None:
+ data[x] = None
+ elif isinstance(xo, vim.vm.ConfigSpec):
+ data[x] = serialize_spec(xo)
+ elif isinstance(xo, vim.vm.RelocateSpec):
+ data[x] = serialize_spec(xo)
+ elif isinstance(xo, vim.vm.device.VirtualDisk):
+ data[x] = serialize_spec(xo)
+ elif isinstance(xo, vim.vm.device.VirtualDeviceSpec.FileOperation):
+ data[x] = to_text(xo)
+ elif isinstance(xo, vim.Description):
+ data[x] = {
+ 'dynamicProperty': serialize_spec(xo.dynamicProperty),
+ 'dynamicType': serialize_spec(xo.dynamicType),
+ 'label': serialize_spec(xo.label),
+ 'summary': serialize_spec(xo.summary),
+ }
+ elif hasattr(xo, 'name'):
+ data[x] = to_text(xo) + ':' + to_text(xo.name)
+ elif isinstance(xo, vim.vm.ProfileSpec):
+ pass
+ elif issubclass(xt, list):
+ data[x] = []
+ for xe in xo:
+ data[x].append(serialize_spec(xe))
+ elif issubclass(xt, string_types + integer_types + (float, bool)):
+ if issubclass(xt, integer_types):
+ data[x] = int(xo)
+ else:
+ data[x] = to_text(xo)
+ elif issubclass(xt, bool):
+ data[x] = xo
+ elif issubclass(xt, dict):
+ data[to_text(x)] = {}
+ for k, v in xo.items():
+ k = to_text(k)
+ data[x][k] = serialize_spec(v)
+ else:
+ data[x] = str(xt)
+
+ return data
+
+
+def find_host_by_cluster_datacenter(module, content, datacenter_name, cluster_name, host_name):
+ dc = find_datacenter_by_name(content, datacenter_name)
+ if dc is None:
+ module.fail_json(msg="Unable to find datacenter with name %s" % datacenter_name)
+ cluster = find_cluster_by_name(content, cluster_name, datacenter=dc)
+ if cluster is None:
+ module.fail_json(msg="Unable to find cluster with name %s" % cluster_name)
+
+ for host in cluster.host:
+ if host.name == host_name:
+ return host, cluster
+
+ return None, cluster
+
+
+def set_vm_power_state(content, vm, state, force, timeout=0, answers=None):
+ """
+ Set the power status for a VM determined by the current and
+ requested states. force is forceful
+ """
+ facts = gather_vm_facts(content, vm)
+ if state == 'present':
+ state = 'poweredon'
+ expected_state = state.replace('_', '').replace('-', '').lower()
+ current_state = facts['hw_power_status'].lower()
+ result = dict(
+ changed=False,
+ failed=False,
+ )
+
+ # Need Force
+ if not force and current_state not in ['poweredon', 'poweredoff']:
+ result['failed'] = True
+ result['msg'] = "Virtual Machine is in %s power state. Force is required!" % current_state
+ result['instance'] = gather_vm_facts(content, vm)
+ return result
+
+ # State is not already true
+ if current_state != expected_state:
+ task = None
+ try:
+ if expected_state == 'poweredoff':
+ task = vm.PowerOff()
+
+ elif expected_state == 'poweredon':
+ task = vm.PowerOn()
+
+ elif expected_state == 'restarted':
+ if current_state in ('poweredon', 'poweringon', 'resetting', 'poweredoff'):
+ task = vm.Reset()
+ else:
+ result['failed'] = True
+ result['msg'] = "Cannot restart virtual machine in the current state %s" % current_state
+
+ elif expected_state == 'suspended':
+ if current_state in ('poweredon', 'poweringon'):
+ task = vm.Suspend()
+ else:
+ result['failed'] = True
+ result['msg'] = 'Cannot suspend virtual machine in the current state %s' % current_state
+
+ elif expected_state in ['shutdownguest', 'rebootguest']:
+ if current_state == 'poweredon':
+ if vm.guest.toolsRunningStatus == 'guestToolsRunning':
+ if expected_state == 'shutdownguest':
+ task = vm.ShutdownGuest()
+ if timeout > 0:
+ result.update(wait_for_poweroff(vm, timeout))
+ else:
+ task = vm.RebootGuest()
+ # Set result['changed'] immediately because
+ # shutdown and reboot return None.
+ result['changed'] = True
+ else:
+ result['failed'] = True
+ result['msg'] = "VMware tools should be installed for guest shutdown/reboot"
+ elif current_state == 'poweredoff':
+ result['changed'] = False
+ else:
+ result['failed'] = True
+ result['msg'] = "Virtual machine %s must be in poweredon state for guest reboot" % vm.name
+
+ else:
+ result['failed'] = True
+ result['msg'] = "Unsupported expected state provided: %s" % expected_state
+
+ except Exception as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+
+ if task:
+ try:
+ wait_for_task(task, vm=vm, answers=answers)
+ except TaskError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ finally:
+ if task.info.state == 'error':
+ result['failed'] = True
+ result['msg'] = task.info.error.msg
+ else:
+ result['changed'] = True
+
+ # need to get new metadata if changed
+ result['instance'] = gather_vm_facts(content, vm)
+
+ return result
+
+
+def wait_for_poweroff(vm, timeout=300):
+ result = dict()
+ interval = 15
+ while timeout > 0:
+ if vm.runtime.powerState.lower() == 'poweredoff':
+ break
+ time.sleep(interval)
+ timeout -= interval
+ else:
+ result['failed'] = True
+ result['msg'] = 'Timeout while waiting for VM power off.'
+ return result
+
+
+def is_integer(value, type_of='int'):
+ try:
+ VmomiSupport.vmodlTypes[type_of](value)
+ return True
+ except (TypeError, ValueError):
+ return False
+
+
+def is_boolean(value):
+ if str(value).lower() in ['true', 'on', 'yes', 'false', 'off', 'no']:
+ return True
+ return False
+
+
+def is_truthy(value):
+ if str(value).lower() in ['true', 'on', 'yes']:
+ return True
+ return False
+
+
+# options is the dict as defined in the module parameters, current_options is
+# the list of the currently set options as returned by the vSphere API.
+# When truthy_strings_as_bool is True, strings like 'true', 'off' or 'yes'
+# are converted to booleans.
+def option_diff(options, current_options, truthy_strings_as_bool=True):
+ current_options_dict = {}
+ for option in current_options:
+ current_options_dict[option.key] = option.value
+
+ change_option_list = []
+ for option_key, option_value in options.items():
+ if truthy_strings_as_bool and is_boolean(option_value):
+ option_value = VmomiSupport.vmodlTypes['bool'](is_truthy(option_value))
+ elif type(option_value) is int:
+ option_value = VmomiSupport.vmodlTypes['int'](option_value)
+ elif type(option_value) is float:
+ option_value = VmomiSupport.vmodlTypes['float'](option_value)
+ elif type(option_value) is str:
+ option_value = VmomiSupport.vmodlTypes['string'](option_value)
+
+ if option_key not in current_options_dict or current_options_dict[option_key] != option_value:
+ change_option_list.append(vim.option.OptionValue(key=option_key, value=option_value))
+
+ return change_option_list
+
+
+def quote_obj_name(object_name=None):
+ """
+ Replace special characters in object name
+ with urllib quote equivalent
+
+ """
+ if not object_name:
+ return None
+
+ SPECIAL_CHARS = OrderedDict({
+ '%': '%25',
+ '/': '%2f',
+ '\\': '%5c'
+ })
+ for key in SPECIAL_CHARS.keys():
+ if key in object_name:
+ object_name = object_name.replace(key, SPECIAL_CHARS[key])
+
+ return object_name
+
+
+class PyVmomi(object):
+ def __init__(self, module):
+ """
+ Constructor
+ """
+ if not HAS_REQUESTS:
+ module.fail_json(msg=missing_required_lib('requests'),
+ exception=REQUESTS_IMP_ERR)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg=missing_required_lib('PyVmomi'),
+ exception=PYVMOMI_IMP_ERR)
+
+ self.module = module
+ self.params = module.params
+ self.current_vm_obj = None
+ self.si, self.content = connect_to_api(self.module, return_si=True)
+ self.custom_field_mgr = []
+ if self.content.customFieldsManager: # not an ESXi
+ self.custom_field_mgr = self.content.customFieldsManager.field
+
+ def is_vcenter(self):
+ """
+ Check if given hostname is vCenter or ESXi host
+ Returns: True if given connection is with vCenter server
+ False if given connection is with ESXi server
+
+ """
+ api_type = None
+ try:
+ api_type = self.content.about.apiType
+ except (vmodl.RuntimeFault, vim.fault.VimFault) as exc:
+ self.module.fail_json(msg="Failed to get status of vCenter server : %s" % exc.msg)
+
+ if api_type == 'VirtualCenter':
+ return True
+ elif api_type == 'HostAgent':
+ return False
+
+ def vcenter_version_at_least(self, version=None):
+ """
+ Check that the vCenter server is at least a specific version number
+ Args:
+ version (tuple): a version tuple, for example (6, 7, 0)
+ Returns: bool
+ """
+ if version:
+ vc_version = self.content.about.version
+ return StrictVersion(vc_version) >= StrictVersion('.'.join(map(str, version)))
+ self.module.fail_json(msg='The passed vCenter version: %s is None.' % version)
+
+ def get_cert_fingerprint(self, fqdn, port, proxy_host=None, proxy_port=None):
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(1)
+ if proxy_host:
+ sock.connect((
+ proxy_host,
+ proxy_port))
+ command = "CONNECT %s:%d HTTP/1.0\r\n\r\n" % (fqdn, port)
+ sock.send(command.encode())
+ buf = sock.recv(8192).decode()
+ if buf.split()[1] != '200':
+ self.module.fail_json(msg="Failed to connect to the proxy")
+ ctx = ssl.create_default_context()
+ ctx.check_hostname = False
+ ctx.verify_mode = ssl.CERT_NONE
+ der_cert_bin = ctx.wrap_socket(sock, server_hostname=fqdn).getpeercert(True)
+ sock.close()
+ else:
+ wrapped_socket = ssl.wrap_socket(sock)
+ try:
+ wrapped_socket.connect((fqdn, port))
+ except socket.error as socket_error:
+ self.module.fail_json(msg="Cannot connect to host : %s" % socket_error)
+ else:
+ der_cert_bin = wrapped_socket.getpeercert(True)
+ wrapped_socket.close()
+
+ string = str(hashlib.sha1(der_cert_bin).hexdigest())
+ return ':'.join(a + b for a, b in zip(string[::2], string[1::2]))
+
+ def get_managed_objects_properties(self, vim_type, properties=None):
+ """
+ Look up a Managed Object Reference in vCenter / ESXi Environment
+ :param vim_type: Type of vim object e.g, for datacenter - vim.Datacenter
+ :param properties: List of properties related to vim object e.g. Name
+ :return: local content object
+ """
+ # Get Root Folder
+ root_folder = self.content.rootFolder
+
+ if properties is None:
+ properties = ['name']
+
+ # Create Container View with default root folder
+ mor = self.content.viewManager.CreateContainerView(root_folder, [vim_type], True)
+
+ # Create Traversal spec
+ traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
+ name="traversal_spec",
+ path='view',
+ skip=False,
+ type=vim.view.ContainerView
+ )
+
+ # Create Property Spec
+ property_spec = vmodl.query.PropertyCollector.PropertySpec(
+ type=vim_type, # Type of object to retrieved
+ all=False,
+ pathSet=properties
+ )
+
+ # Create Object Spec
+ object_spec = vmodl.query.PropertyCollector.ObjectSpec(
+ obj=mor,
+ skip=True,
+ selectSet=[traversal_spec]
+ )
+
+ # Create Filter Spec
+ filter_spec = vmodl.query.PropertyCollector.FilterSpec(
+ objectSet=[object_spec],
+ propSet=[property_spec],
+ reportMissingObjectsInResults=False
+ )
+
+ return self.content.propertyCollector.RetrieveContents([filter_spec])
+
+ # Virtual Machine related functions
+ def get_vm(self):
+ """
+ Find unique virtual machine either by UUID, MoID or Name.
+ Returns: virtual machine object if found, else None.
+
+ """
+ vm_obj = None
+ user_desired_path = None
+ use_instance_uuid = self.params.get('use_instance_uuid') or False
+ if 'uuid' in self.params and self.params['uuid']:
+ if not use_instance_uuid:
+ vm_obj = find_vm_by_id(self.content, vm_id=self.params['uuid'], vm_id_type="uuid")
+ elif use_instance_uuid:
+ vm_obj = find_vm_by_id(self.content,
+ vm_id=self.params['uuid'],
+ vm_id_type="instance_uuid")
+ elif 'name' in self.params and self.params['name']:
+ objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
+ vms = []
+
+ for temp_vm_object in objects:
+ if (
+ len(temp_vm_object.propSet) == 1
+ and unquote(temp_vm_object.propSet[0].val) == self.params["name"]
+ ):
+ vms.append(temp_vm_object.obj)
+
+ # get_managed_objects_properties may return multiple virtual machine,
+ # following code tries to find user desired one depending upon the folder specified.
+ if len(vms) > 1:
+ # We have found multiple virtual machines, decide depending upon folder value
+ if self.params['folder'] is None:
+ self.module.fail_json(msg="Multiple virtual machines with same name [%s] found, "
+ "Folder value is a required parameter to find uniqueness "
+ "of the virtual machine" % self.params['name'],
+ details="Please see documentation of the vmware_guest module "
+ "for folder parameter.")
+
+ # Get folder path where virtual machine is located
+ # User provided folder where user thinks virtual machine is present
+ user_folder = self.params['folder']
+ # User defined datacenter
+ user_defined_dc = self.params['datacenter']
+ # User defined datacenter's object
+ datacenter_obj = find_datacenter_by_name(self.content, self.params['datacenter'])
+ # Get Path for Datacenter
+ dcpath = compile_folder_path_for_object(vobj=datacenter_obj)
+
+ # Nested folder does not return trailing /
+ if not dcpath.endswith('/'):
+ dcpath += '/'
+
+ if user_folder in [None, '', '/']:
+ # User provided blank value or
+ # User provided only root value, we fail
+ self.module.fail_json(msg="vmware_guest found multiple virtual machines with same "
+ "name [%s], please specify folder path other than blank "
+ "or '/'" % self.params['name'])
+ elif user_folder.startswith('/vm/'):
+ # User provided nested folder under VMware default vm folder i.e. folder = /vm/india/finance
+ user_desired_path = "%s%s%s" % (dcpath, user_defined_dc, user_folder)
+ else:
+ # User defined datacenter is not nested i.e. dcpath = '/' , or
+ # User defined datacenter is nested i.e. dcpath = '/F0/DC0' or
+ # User provided folder starts with / and datacenter i.e. folder = /ha-datacenter/ or
+ # User defined folder starts with datacenter without '/' i.e.
+ # folder = DC0/vm/india/finance or
+ # folder = DC0/vm
+ user_desired_path = user_folder
+
+ for vm in vms:
+ # Check if user has provided same path as virtual machine
+ actual_vm_folder_path = self.get_vm_path(content=self.content, vm_name=vm)
+ if not actual_vm_folder_path.startswith("%s%s" % (dcpath, user_defined_dc)):
+ continue
+ if user_desired_path in actual_vm_folder_path:
+ vm_obj = vm
+ break
+ elif vms:
+ # Unique virtual machine found.
+ actual_vm_folder_path = self.get_vm_path(content=self.content, vm_name=vms[0])
+ if self.params.get('folder') is None:
+ vm_obj = vms[0]
+ elif self.params['folder'] in actual_vm_folder_path:
+ vm_obj = vms[0]
+ elif 'moid' in self.params and self.params['moid']:
+ vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.params['moid'], self.si._stub)
+ try:
+ getattr(vm_obj, 'name')
+ except vmodl.fault.ManagedObjectNotFound:
+ vm_obj = None
+
+ if vm_obj:
+ self.current_vm_obj = vm_obj
+
+ return vm_obj
+
+ def gather_facts(self, vm):
+ """
+ Gather facts of virtual machine.
+ Args:
+ vm: Name of virtual machine.
+
+ Returns: Facts dictionary of the given virtual machine.
+
+ """
+ return gather_vm_facts(self.content, vm)
+
+ @staticmethod
+ def get_vm_path(content, vm_name):
+ """
+ Find the path of virtual machine.
+ Args:
+ content: VMware content object
+ vm_name: virtual machine managed object
+
+ Returns: Folder of virtual machine if exists, else None
+
+ """
+ folder_name = None
+ folder = vm_name.parent
+ if folder:
+ folder_name = folder.name
+ fp = folder.parent
+ # climb back up the tree to find our path, stop before the root folder
+ while fp is not None and fp.name is not None and fp != content.rootFolder:
+ folder_name = fp.name + '/' + folder_name
+ try:
+ fp = fp.parent
+ except Exception:
+ break
+ folder_name = '/' + folder_name
+ return folder_name
+
+ def get_vm_or_template(self, template_name=None):
+ """
+ Find the virtual machine or virtual machine template using name
+ used for cloning purpose.
+ Args:
+ template_name: Name of virtual machine or virtual machine template
+
+ Returns: virtual machine or virtual machine template object
+
+ """
+ template_obj = None
+ if not template_name:
+ return template_obj
+
+ if "/" in template_name:
+ vm_obj_path = os.path.dirname(template_name)
+ vm_obj_name = os.path.basename(template_name)
+ template_obj = find_vm_by_id(self.content, vm_obj_name, vm_id_type="inventory_path", folder=vm_obj_path)
+ if template_obj:
+ return template_obj
+ else:
+ template_obj = find_vm_by_id(self.content, vm_id=template_name, vm_id_type="uuid")
+ if template_obj:
+ return template_obj
+
+ objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
+ templates = []
+
+ for temp_vm_object in objects:
+ if len(temp_vm_object.propSet) != 1:
+ continue
+ for temp_vm_object_property in temp_vm_object.propSet:
+ if temp_vm_object_property.val == template_name:
+ templates.append(temp_vm_object.obj)
+ break
+
+ if len(templates) > 1:
+ # We have found multiple virtual machine templates
+ self.module.fail_json(msg="Multiple virtual machines or templates with same name [%s] found." % template_name)
+ elif templates:
+ template_obj = templates[0]
+
+ return template_obj
+
+ # Cluster related functions
+ def find_cluster_by_name(self, cluster_name, datacenter_name=None):
+ """
+ Find Cluster by name in given datacenter
+ Args:
+ cluster_name: Name of cluster name to find
+ datacenter_name: (optional) Name of datacenter
+
+ Returns: True if found
+
+ """
+ return find_cluster_by_name(self.content, cluster_name, datacenter=datacenter_name)
+
+ def get_all_hosts_by_cluster(self, cluster_name):
+ """
+ Get all hosts from cluster by cluster name
+ Args:
+ cluster_name: Name of cluster
+
+ Returns: List of hosts
+
+ """
+ cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
+ if cluster_obj:
+ return list(cluster_obj.host)
+ else:
+ return []
+
+ # Hosts related functions
+ def find_hostsystem_by_name(self, host_name, datacenter=None):
+ """
+ Find Host by name
+ Args:
+ host_name: Name of ESXi host
+ datacenter: (optional) Datacenter of ESXi resides
+
+ Returns: True if found
+
+ """
+ return find_hostsystem_by_name(self.content, hostname=host_name, datacenter=datacenter)
+
+ def get_all_host_objs(self, cluster_name=None, esxi_host_name=None):
+ """
+ Get all host system managed object
+
+ Args:
+ cluster_name: Name of Cluster
+ esxi_host_name: Name of ESXi server
+
+ Returns: A list of all host system managed objects, else empty list
+
+ """
+ host_obj_list = []
+ if not self.is_vcenter():
+ hosts = get_all_objs(self.content, [vim.HostSystem]).keys()
+ if hosts:
+ host_obj_list.append(list(hosts)[0])
+ else:
+ if cluster_name:
+ cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
+ if cluster_obj:
+ host_obj_list = list(cluster_obj.host)
+ else:
+ self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
+ elif esxi_host_name:
+ if isinstance(esxi_host_name, str):
+ esxi_host_name = [esxi_host_name]
+
+ for host in esxi_host_name:
+ esxi_host_obj = self.find_hostsystem_by_name(host_name=host)
+ if esxi_host_obj:
+ host_obj_list.append(esxi_host_obj)
+ else:
+ self.module.fail_json(changed=False, msg="ESXi '%s' not found" % host)
+
+ return host_obj_list
+
+ def host_version_at_least(self, version=None, vm_obj=None, host_name=None):
+ """
+ Check that the ESXi Host is at least a specific version number
+ Args:
+ vm_obj: virtual machine object, required one of vm_obj, host_name
+ host_name (string): ESXi host name
+ version (tuple): a version tuple, for example (6, 7, 0)
+ Returns: bool
+ """
+ if vm_obj:
+ host_system = vm_obj.summary.runtime.host
+ elif host_name:
+ host_system = self.find_hostsystem_by_name(host_name=host_name)
+ else:
+ self.module.fail_json(msg='VM object or ESXi host name must be set one.')
+ if host_system and version:
+ host_version = host_system.summary.config.product.version
+ return StrictVersion(host_version) >= StrictVersion('.'.join(map(str, version)))
+ else:
+ self.module.fail_json(msg='Unable to get the ESXi host from vm: %s, or hostname %s,'
+ 'or the passed ESXi version: %s is None.' % (vm_obj, host_name, version))
+
+ # Network related functions
+ @staticmethod
+ def find_host_portgroup_by_name(host, portgroup_name):
+ """
+ Find Portgroup on given host
+ Args:
+ host: Host config object
+ portgroup_name: Name of portgroup
+
+ Returns: True if found else False
+
+ """
+ for portgroup in host.config.network.portgroup:
+ if portgroup.spec.name == portgroup_name:
+ return portgroup
+ return False
+
+ def get_all_port_groups_by_host(self, host_system):
+ """
+ Get all Port Group by host
+ Args:
+ host_system: Name of Host System
+
+ Returns: List of Port Group Spec
+ """
+ pgs_list = []
+ for pg in host_system.config.network.portgroup:
+ pgs_list.append(pg)
+ return pgs_list
+
+ def find_network_by_name(self, network_name=None):
+ """
+ Get network specified by name
+ Args:
+ network_name: Name of network
+
+ Returns: List of network managed objects
+ """
+ networks = []
+
+ if not network_name:
+ return networks
+
+ objects = self.get_managed_objects_properties(vim_type=vim.Network, properties=['name'])
+
+ for temp_vm_object in objects:
+ if len(temp_vm_object.propSet) != 1:
+ continue
+ for temp_vm_object_property in temp_vm_object.propSet:
+ if temp_vm_object_property.val == network_name:
+ networks.append(temp_vm_object.obj)
+ break
+ return networks
+
+ def network_exists_by_name(self, network_name=None):
+ """
+ Check if network with a specified name exists or not
+ Args:
+ network_name: Name of network
+
+ Returns: True if network exists else False
+ """
+ ret = False
+ if not network_name:
+ return ret
+ ret = True if self.find_network_by_name(network_name=network_name) else False
+ return ret
+
+ # Datacenter
+ def find_datacenter_by_name(self, datacenter_name):
+ """
+ Get datacenter managed object by name
+
+ Args:
+ datacenter_name: Name of datacenter
+
+ Returns: datacenter managed object if found else None
+
+ """
+ return find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
+
+ def is_datastore_valid(self, datastore_obj=None):
+ """
+ Check if datastore selected is valid or not
+ Args:
+ datastore_obj: datastore managed object
+
+ Returns: True if datastore is valid, False if not
+ """
+ if not datastore_obj \
+ or datastore_obj.summary.maintenanceMode != 'normal' \
+ or not datastore_obj.summary.accessible:
+ return False
+ return True
+
+ def find_datastore_by_name(self, datastore_name, datacenter_name=None):
+ """
+ Get datastore managed object by name
+ Args:
+ datastore_name: Name of datastore
+ datacenter_name: Name of datacenter where the datastore resides. This is needed because Datastores can be
+ shared across Datacenters, so we need to specify the datacenter to assure we get the correct Managed Object Reference
+
+ Returns: datastore managed object if found else None
+
+ """
+ return find_datastore_by_name(self.content, datastore_name=datastore_name, datacenter_name=datacenter_name)
+
+ def find_folder_by_name(self, folder_name):
+ """
+ Get vm folder managed object by name
+ Args:
+ folder_name: Name of the vm folder
+
+ Returns: vm folder managed object if found else None
+
+ """
+ return find_folder_by_name(self.content, folder_name=folder_name)
+
+ def find_folder_by_fqpn(self, folder_name, datacenter_name=None, folder_type=None):
+ """
+ Get a unique folder managed object by specifying its Fully Qualified Path Name
+ as datacenter/folder_type/sub1/sub2
+ Args:
+ folder_name: Fully Qualified Path Name folder name
+ datacenter_name: Name of the datacenter, taken from Fully Qualified Path Name if not defined
+ folder_type: Type of folder, vm, host, datastore or network,
+ taken from Fully Qualified Path Name if not defined
+
+ Returns: folder managed object if found, else None
+
+ """
+ return find_folder_by_fqpn(self.content, folder_name=folder_name, datacenter_name=datacenter_name, folder_type=folder_type)
+
+ # Datastore cluster
+ def find_datastore_cluster_by_name(self, datastore_cluster_name, datacenter=None, folder=None):
+ """
+ Get datastore cluster managed object by name
+ Args:
+ datastore_cluster_name: Name of datastore cluster
+ datacenter: Managed object of the datacenter
+ folder: Managed object of the folder which holds datastore
+
+ Returns: Datastore cluster managed object if found else None
+
+ """
+ if datacenter and hasattr(datacenter, 'datastoreFolder'):
+ folder = datacenter.datastoreFolder
+ if not folder:
+ folder = self.content.rootFolder
+
+ data_store_clusters = get_all_objs(self.content, [vim.StoragePod], folder=folder)
+ for dsc in data_store_clusters:
+ if dsc.name == datastore_cluster_name:
+ return dsc
+ return None
+
+ def get_recommended_datastore(self, datastore_cluster_obj=None):
+ """
+ Return Storage DRS recommended datastore from datastore cluster
+ Args:
+ datastore_cluster_obj: datastore cluster managed object
+
+ Returns: Name of recommended datastore from the given datastore cluster
+
+ """
+ if datastore_cluster_obj is None:
+ return None
+ # Check if Datastore Cluster provided by user is SDRS ready
+ sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
+ if sdrs_status:
+ # We can get storage recommendation only if SDRS is enabled on given datastorage cluster
+ pod_sel_spec = vim.storageDrs.PodSelectionSpec()
+ pod_sel_spec.storagePod = datastore_cluster_obj
+ storage_spec = vim.storageDrs.StoragePlacementSpec()
+ storage_spec.podSelectionSpec = pod_sel_spec
+ storage_spec.type = 'create'
+
+ try:
+ rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
+ rec_action = rec.recommendations[0].action[0]
+ return rec_action.destination.name
+ except Exception:
+ # There is some error so we fall back to general workflow
+ pass
+ datastore = None
+ datastore_freespace = 0
+ for ds in datastore_cluster_obj.childEntity:
+ if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
+ # If datastore field is provided, filter destination datastores
+ if not self.is_datastore_valid(datastore_obj=ds):
+ continue
+
+ datastore = ds
+ datastore_freespace = ds.summary.freeSpace
+ if datastore:
+ return datastore.name
+ return None
+
+ # Resource pool
+ def find_resource_pool_by_name(self, resource_pool_name='Resources', folder=None):
+ """
+ Get resource pool managed object by name
+ Args:
+ resource_pool_name: Name of resource pool
+
+ Returns: Resource pool managed object if found else None
+
+ """
+ if not folder:
+ folder = self.content.rootFolder
+
+ resource_pools = get_all_objs(self.content, [vim.ResourcePool], folder=folder)
+ for rp in resource_pools:
+ if rp.name == resource_pool_name:
+ return rp
+ return None
+
+ def find_resource_pool_by_cluster(self, resource_pool_name='Resources', cluster=None):
+ """
+ Get resource pool managed object by cluster object
+ Args:
+ resource_pool_name: Name of resource pool
+ cluster: Managed object of cluster
+
+ Returns: Resource pool managed object if found else None
+
+ """
+ desired_rp = None
+ if not cluster:
+ return desired_rp
+
+ if resource_pool_name != 'Resources':
+ # Resource pool name is different than default 'Resources'
+ resource_pools = cluster.resourcePool.resourcePool
+ if resource_pools:
+ for rp in resource_pools:
+ if rp.name == resource_pool_name:
+ desired_rp = rp
+ break
+ else:
+ desired_rp = cluster.resourcePool
+
+ return desired_rp
+
+ # VMDK stuff
+ def vmdk_disk_path_split(self, vmdk_path):
+ """
+ Takes a string in the format
+
+ [datastore_name] path/to/vm_name.vmdk
+
+ Returns a tuple with multiple strings:
+
+ 1. datastore_name: The name of the datastore (without brackets)
+ 2. vmdk_fullpath: The "path/to/vm_name.vmdk" portion
+ 3. vmdk_filename: The "vm_name.vmdk" portion of the string (os.path.basename equivalent)
+ 4. vmdk_folder: The "path/to/" portion of the string (os.path.dirname equivalent)
+ """
+ try:
+ datastore_name = re.match(r'^\[(.*?)\]', vmdk_path, re.DOTALL).groups()[0]
+ vmdk_fullpath = re.match(r'\[.*?\] (.*)$', vmdk_path).groups()[0]
+ vmdk_filename = os.path.basename(vmdk_fullpath)
+ vmdk_folder = os.path.dirname(vmdk_fullpath)
+ return datastore_name, vmdk_fullpath, vmdk_filename, vmdk_folder
+ except (IndexError, AttributeError) as e:
+ self.module.fail_json(msg="Bad path '%s' for filename disk vmdk image: %s" % (vmdk_path, to_native(e)))
+
+ def find_vmdk_file(self, datastore_obj, vmdk_fullpath, vmdk_filename, vmdk_folder):
+ """
+ Return vSphere file object or fail_json
+ Args:
+ datastore_obj: Managed object of datastore
+ vmdk_fullpath: Path of VMDK file e.g., path/to/vm/vmdk_filename.vmdk
+ vmdk_filename: Name of vmdk e.g., VM0001_1.vmdk
+ vmdk_folder: Base dir of VMDK e.g, path/to/vm
+
+ """
+
+ browser = datastore_obj.browser
+ datastore_name = datastore_obj.name
+ datastore_name_sq = "[" + datastore_name + "]"
+ if browser is None:
+ self.module.fail_json(msg="Unable to access browser for datastore %s" % datastore_name)
+
+ detail_query = vim.host.DatastoreBrowser.FileInfo.Details(
+ fileOwner=True,
+ fileSize=True,
+ fileType=True,
+ modification=True
+ )
+ search_spec = vim.host.DatastoreBrowser.SearchSpec(
+ details=detail_query,
+ matchPattern=[vmdk_filename],
+ searchCaseInsensitive=True,
+ )
+ search_res = browser.SearchSubFolders(
+ datastorePath=datastore_name_sq,
+ searchSpec=search_spec
+ )
+
+ changed = False
+ vmdk_path = datastore_name_sq + " " + vmdk_fullpath
+ try:
+ changed, result = wait_for_task(search_res)
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+
+ if not changed:
+ self.module.fail_json(msg="No valid disk vmdk image found for path %s" % vmdk_path)
+
+ target_folder_paths = [
+ datastore_name_sq + " " + vmdk_folder + '/',
+ datastore_name_sq + " " + vmdk_folder,
+ ]
+
+ for file_result in search_res.info.result:
+ for f in getattr(file_result, 'file'):
+ if f.path == vmdk_filename and file_result.folderPath in target_folder_paths:
+ return f
+
+ self.module.fail_json(msg="No vmdk file found for path specified [%s]" % vmdk_path)
+
+ def find_first_class_disk_by_name(self, disk_name, datastore_obj):
+ """
+ Get first-class disk managed object by name
+ Args:
+ disk_name: Name of the first-class disk
+ datastore_obj: Managed object of datastore
+
+ Returns: First-class disk managed object if found else None
+
+ """
+
+ if self.is_vcenter():
+ for id in self.content.vStorageObjectManager.ListVStorageObject(datastore_obj):
+ disk = self.content.vStorageObjectManager.RetrieveVStorageObject(id, datastore_obj)
+ if disk.config.name == disk_name:
+ return disk
+ else:
+ for id in self.content.vStorageObjectManager.HostListVStorageObject(datastore_obj):
+ disk = self.content.vStorageObjectManager.HostRetrieveVStorageObject(id, datastore_obj)
+ if disk.config.name == disk_name:
+ return disk
+
+ return None
+
+ #
+ # Conversion to JSON
+ #
+
+ def _deepmerge(self, d, u):
+ """
+ Deep merges u into d.
+
+ Credit:
+ https://bit.ly/2EDOs1B (stackoverflow question 3232943)
+ License:
+ cc-by-sa 3.0 (https://creativecommons.org/licenses/by-sa/3.0/)
+ Changes:
+ using collections_compat for compatibility
+
+ Args:
+ - d (dict): dict to merge into
+ - u (dict): dict to merge into d
+
+ Returns:
+ dict, with u merged into d
+ """
+ for k, v in iteritems(u):
+ if isinstance(v, collections_compat.Mapping):
+ d[k] = self._deepmerge(d.get(k, {}), v)
+ else:
+ d[k] = v
+ return d
+
+ def _extract(self, data, remainder):
+ """
+ This is used to break down dotted properties for extraction.
+
+ Args:
+ - data (dict): result of _jsonify on a property
+ - remainder: the remainder of the dotted property to select
+
+ Return:
+ dict
+ """
+ result = dict()
+ if '.' not in remainder:
+ result[remainder] = data[remainder]
+ return result
+ key, remainder = remainder.split('.', 1)
+ if isinstance(data, list):
+ temp_ds = []
+ for i in range(len(data)):
+ temp_ds.append(self._extract(data[i][key], remainder))
+ result[key] = temp_ds
+ else:
+ result[key] = self._extract(data[key], remainder)
+ return result
+
+ def _jsonify(self, obj):
+ """
+ Convert an object from pyVmomi into JSON.
+
+ Args:
+ - obj (object): vim object
+
+ Return:
+ dict
+ """
+ return json.loads(json.dumps(obj, cls=VmomiSupport.VmomiJSONEncoder,
+ sort_keys=True, strip_dynamic=True))
+
+ def to_json(self, obj, properties=None):
+ """
+ Convert a vSphere (pyVmomi) Object into JSON. This is a deep
+ transformation. The list of properties is optional - if not
+ provided then all properties are deeply converted. The resulting
+ JSON is sorted to improve human readability.
+
+ Requires upstream support from pyVmomi > 6.7.1
+ (https://github.com/vmware/pyvmomi/pull/732)
+
+ Args:
+ - obj (object): vim object
+ - properties (list, optional): list of properties following
+ the property collector specification, for example:
+ ["config.hardware.memoryMB", "name", "overallStatus"]
+ default is a complete object dump, which can be large
+
+ Return:
+ dict
+ """
+ if not HAS_PYVMOMIJSON:
+ self.module.fail_json(msg='The installed version of pyvmomi lacks JSON output support; need pyvmomi>6.7.1')
+
+ result = dict()
+ if properties:
+ for prop in properties:
+ try:
+ if '.' in prop:
+ key, remainder = prop.split('.', 1)
+ tmp = dict()
+ tmp[key] = self._extract(self._jsonify(getattr(obj, key)), remainder)
+ self._deepmerge(result, tmp)
+ else:
+ result[prop] = self._jsonify(getattr(obj, prop))
+ # To match gather_vm_facts output
+ prop_name = prop
+ if prop.lower() == '_moid':
+ prop_name = 'moid'
+ elif prop.lower() == '_vimref':
+ prop_name = 'vimref'
+ result[prop_name] = result[prop]
+ except (AttributeError, KeyError):
+ self.module.fail_json(msg="Property '{0}' not found.".format(prop))
+ else:
+ result = self._jsonify(obj)
+ return result
+
+ def get_folder_path(self, cur):
+ full_path = '/' + cur.name
+ while hasattr(cur, 'parent') and cur.parent:
+ if cur.parent == self.content.rootFolder:
+ break
+ cur = cur.parent
+ full_path = '/' + cur.name + full_path
+ return full_path
+
+ def find_obj_by_moid(self, object_type, moid):
+ """
+ Get Managed Object based on an object type and moid.
+ If you'd like to search for a virtual machine, recommended you use get_vm method.
+
+ Args:
+ - object_type: Managed Object type
+ It is possible to specify types the following.
+ ["Datacenter", "ClusterComputeResource", "ResourcePool", "Folder", "HostSystem",
+ "VirtualMachine", "DistributedVirtualSwitch", "DistributedVirtualPortgroup", "Datastore"]
+ - moid: moid of Managed Object
+ :return: Managed Object if it exists else None
+ """
+
+ obj = VmomiSupport.templateOf(object_type)(moid, self.si._stub)
+ try:
+ getattr(obj, 'name')
+ except vmodl.fault.ManagedObjectNotFound:
+ obj = None
+
+ return obj
diff --git a/ansible_collections/community/vmware/plugins/module_utils/vmware_rest_client.py b/ansible_collections/community/vmware/plugins/module_utils/vmware_rest_client.py
new file mode 100644
index 000000000..c98496c2b
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/module_utils/vmware_rest_client.py
@@ -0,0 +1,574 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+PYVMOMI_IMP_ERR = None
+try:
+ from pyVim import connect # noqa: F401, pylint: disable=unused-import
+ from pyVmomi import vim # noqa: F401, pylint: disable=unused-import
+ HAS_PYVMOMI = True
+except ImportError:
+ PYVMOMI_IMP_ERR = traceback.format_exc()
+ HAS_PYVMOMI = False
+
+VSPHERE_IMP_ERR = None
+try:
+ from com.vmware.vapi.std_client import DynamicID
+ from vmware.vapi.vsphere.client import create_vsphere_client
+ from com.vmware.vapi.std.errors_client import Unauthorized
+ from com.vmware.content.library_client import Item
+ from com.vmware.vcenter_client import (Folder,
+ Datacenter,
+ ResourcePool,
+ Datastore,
+ Cluster,
+ Host)
+ HAS_VSPHERE = True
+except ImportError:
+ VSPHERE_IMP_ERR = traceback.format_exc()
+ HAS_VSPHERE = False
+
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class VmwareRestClient(object):
+ def __init__(self, module):
+ """
+ Constructor
+
+ """
+ self.module = module
+ self.params = module.params
+ self.check_required_library()
+ self.api_client = self.connect_to_vsphere_client()
+
+ # Helper function
+ def get_error_message(self, error):
+ """
+ Helper function to show human readable error messages.
+ """
+ err_msg = []
+ if not error.messages:
+ if isinstance(error, Unauthorized):
+ return "Authorization required."
+ return "Generic error occurred."
+
+ for err in error.messages:
+ err_msg.append(err.default_message % err.args)
+
+ return " ,".join(err_msg)
+
+ def check_required_library(self):
+ """
+ Check required libraries
+
+ """
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'),
+ exception=REQUESTS_IMP_ERR)
+ if not HAS_PYVMOMI:
+ self.module.fail_json(msg=missing_required_lib('PyVmomi'),
+ exception=PYVMOMI_IMP_ERR)
+ if not HAS_VSPHERE:
+ self.module.fail_json(
+ msg=missing_required_lib('vSphere Automation SDK',
+ url='https://code.vmware.com/web/sdk/7.0/vsphere-automation-python'),
+ exception=VSPHERE_IMP_ERR)
+
+ @staticmethod
+ def vmware_client_argument_spec():
+ return dict(
+ hostname=dict(type='str',
+ fallback=(env_fallback, ['VMWARE_HOST'])),
+ username=dict(type='str',
+ fallback=(env_fallback, ['VMWARE_USER']),
+ aliases=['user', 'admin']),
+ password=dict(type='str',
+ fallback=(env_fallback, ['VMWARE_PASSWORD']),
+ aliases=['pass', 'pwd'],
+ no_log=True),
+ port=dict(type='int',
+ default=443,
+ fallback=(env_fallback, ['VMWARE_PORT'])),
+ protocol=dict(type='str',
+ default='https',
+ choices=['https', 'http']),
+ validate_certs=dict(type='bool',
+ fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS']),
+ default=True),
+ proxy_host=dict(type='str',
+ required=False,
+ default=None,
+ fallback=(env_fallback, ['VMWARE_PROXY_HOST'])),
+ proxy_port=dict(type='int',
+ required=False,
+ default=None,
+ fallback=(env_fallback, ['VMWARE_PROXY_PORT'])),
+ )
+
+ def connect_to_vsphere_client(self):
+ """
+ Connect to vSphere API Client with Username and Password
+
+ """
+ username = self.params.get('username')
+ password = self.params.get('password')
+ hostname = self.params.get('hostname')
+ port = self.params.get('port')
+ session = requests.Session()
+ session.verify = self.params.get('validate_certs')
+ protocol = self.params.get('protocol')
+ proxy_host = self.params.get('proxy_host')
+ proxy_port = self.params.get('proxy_port')
+
+ if all([protocol, proxy_host, proxy_port]):
+ proxies = {protocol: "{0}://{1}:{2}".format(protocol, proxy_host, proxy_port)}
+ session.proxies.update(proxies)
+
+ if not all([hostname, username, password]):
+ self.module.fail_json(msg="Missing one of the following : hostname, username, password."
+ " Please read the documentation for more information.")
+
+ msg = "Failed to connect to vCenter or ESXi API at %s:%s" % (hostname, port)
+ try:
+ client = create_vsphere_client(
+ server="%s:%s" % (hostname, port),
+ username=username,
+ password=password,
+ session=session
+ )
+ except requests.exceptions.SSLError as ssl_exc:
+ msg += " due to SSL verification failure"
+ self.module.fail_json(msg="%s : %s" % (msg, to_native(ssl_exc)))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="%s : %s" % (msg, to_native(generic_exc)))
+
+ if client is None:
+ self.module.fail_json(msg="Failed to login to %s" % hostname)
+
+ return client
+
+ def get_tags_for_object(self, tag_service=None, tag_assoc_svc=None, dobj=None, tags=None):
+ """
+ Return tag objects associated with an object
+ Args:
+ dobj: Dynamic object
+ tag_service: Tag service object
+ tag_assoc_svc: Tag Association object
+ tags: List or set to which the tag objects are being added, reference is returned by the method
+ Returns: Tag objects associated with the given object
+ """
+ # This method returns tag objects only,
+ # Please use get_tags_for_dynamic_obj for more object details
+ if tags is None:
+ tags = []
+
+ if not (isinstance(tags, list) or isinstance(tags, set)):
+ self.module.fail_json(msg="The parameter 'tags' must be of type 'list' or 'set', but type %s was passed" % type(tags))
+
+ if not dobj:
+ return tags
+
+ if not tag_service:
+ tag_service = self.api_client.tagging.Tag
+
+ if not tag_assoc_svc:
+ tag_assoc_svc = self.api_client.tagging.TagAssociation
+
+ tag_ids = tag_assoc_svc.list_attached_tags(dobj)
+
+ add_tag = tags.append if isinstance(tags, list) else tags.add
+ for tag_id in tag_ids:
+ add_tag(tag_service.get(tag_id))
+
+ return tags
+
+ def get_tags_for_dynamic_obj(self, dobj=None, tags=None):
+ """
+ Return tag object details associated with object
+ Args:
+ mid: Dynamic object for specified object
+ tags: List or set to which the tag objects are being added, reference is returned by the method
+
+ Returns: Tag object details associated with the given object
+
+ """
+ if tags is None:
+ tags = []
+
+ if not (isinstance(tags, list) or isinstance(tags, set)):
+ self.module.fail_json(msg="The parameter 'tags' must be of type 'list' or 'set', but type %s was passed" % type(tags))
+
+ if dobj is None:
+ return tags
+
+ temp_tags_model = self.get_tags_for_object(dobj=dobj)
+
+ category_service = self.api_client.tagging.Category
+
+ add_tag = tags.append if isinstance(tags, list) else tags.add
+ for tag_obj in temp_tags_model:
+ add_tag({
+ 'id': tag_obj.id,
+ 'category_name': category_service.get(tag_obj.category_id).name,
+ 'name': tag_obj.name,
+ 'description': tag_obj.description,
+ 'category_id': tag_obj.category_id,
+ })
+
+ return tags
+
+ def get_tags_for_datacenter(self, datacenter_mid=None):
+ """
+ Return list of tag object associated with datacenter
+ Args:
+ datacenter_mid: Dynamic object for datacenter
+
+ Returns: List of tag object associated with the given datacenter
+
+ """
+ dobj = DynamicID(type='Datacenter', id=datacenter_mid)
+ return self.get_tags_for_dynamic_obj(dobj=dobj)
+
+ def get_tags_for_datastore(self, datastore_mid=None):
+ """
+ Return list of tag object associated with datastore
+ Args:
+ datastore_mid: Dynamic object for datacenter
+
+ Returns: List of tag object associated with the given datastore
+
+ """
+ dobj = DynamicID(type="Datastore", id=datastore_mid)
+ return self.get_tags_for_dynamic_obj(dobj=dobj)
+
+ def get_tags_for_cluster(self, cluster_mid=None):
+ """
+ Return list of tag object associated with cluster
+ Args:
+ cluster_mid: Dynamic object for cluster
+
+ Returns: List of tag object associated with the given cluster
+
+ """
+ dobj = DynamicID(type='ClusterComputeResource', id=cluster_mid)
+ return self.get_tags_for_dynamic_obj(dobj=dobj)
+
+ def get_tags_for_hostsystem(self, hostsystem_mid=None):
+ """
+ Return list of tag object associated with host system
+ Args:
+ hostsystem_mid: Dynamic object for host system
+
+ Returns: List of tag object associated with the given host system
+
+ """
+ dobj = DynamicID(type='HostSystem', id=hostsystem_mid)
+ return self.get_tags_for_dynamic_obj(dobj=dobj)
+
+ def get_tags_for_vm(self, vm_mid=None):
+ """
+ Return list of tag object associated with virtual machine
+ Args:
+ vm_mid: Dynamic object for virtual machine
+
+ Returns: List of tag object associated with the given virtual machine
+
+ """
+ dobj = DynamicID(type='VirtualMachine', id=vm_mid)
+ return self.get_tags_for_dynamic_obj(dobj=dobj)
+
+ def get_vm_tags(self, tag_service=None, tag_association_svc=None, vm_mid=None):
+ """
+ Return list of tag name associated with virtual machine
+ Args:
+ tag_service: Tag service object
+ tag_association_svc: Tag association object
+ vm_mid: Dynamic object for virtual machine
+
+ Returns: List of tag names associated with the given virtual machine
+
+ """
+ # This API returns just names of tags
+ # Please use get_tags_for_vm for more tag object details
+ tags = []
+ if vm_mid is None:
+ return tags
+
+ temp_tags_model = self.get_tags_for_object(
+ tag_service=tag_service,
+ tag_assoc_svc=tag_association_svc,
+ dobj=vm_mid
+ )
+
+ for tag_obj in temp_tags_model:
+ tags.append(tag_obj.name)
+
+ return tags
+
+ def get_library_item_by_name(self, name):
+ """
+ Returns the identifier of the library item with the given name.
+
+ Args:
+ name (str): The name of item to look for
+
+ Returns:
+ str: The item ID or None if the item is not found
+ """
+ find_spec = Item.FindSpec(name=name)
+ item_ids = self.api_client.content.library.Item.find(find_spec)
+ item_id = item_ids[0] if item_ids else None
+ return item_id
+
+ def get_library_item_from_content_library_name(self, name, content_library_name):
+ """
+ Returns the identifier of the library item with the given name in the specified
+ content library.
+ Args:
+ name (str): The name of item to look for
+ content_library_name (str): The name of the content library to search in
+ Returns:
+ str: The item ID or None if the item is not found
+ """
+ cl_find_spec = self.api_client.content.Library.FindSpec(name=content_library_name)
+ cl_item_ids = self.api_client.content.Library.find(cl_find_spec)
+ cl_item_id = cl_item_ids[0] if cl_item_ids else None
+ if cl_item_id:
+ find_spec = Item.FindSpec(name=name, library_id=cl_item_id)
+ item_ids = self.api_client.content.library.Item.find(find_spec)
+ item_id = item_ids[0] if item_ids else None
+ return item_id
+ else:
+ return None
+
+ def get_datacenter_by_name(self, datacenter_name):
+ """
+ Returns the identifier of a datacenter
+ Note: The method assumes only one datacenter with the mentioned name.
+ """
+ filter_spec = Datacenter.FilterSpec(names=set([datacenter_name]))
+ datacenter_summaries = self.api_client.vcenter.Datacenter.list(filter_spec)
+ datacenter = datacenter_summaries[0].datacenter if len(datacenter_summaries) > 0 else None
+ return datacenter
+
+ def get_folder_by_name(self, datacenter_name, folder_name):
+ """
+ Returns the identifier of a folder
+ with the mentioned names.
+ """
+ datacenter = self.get_datacenter_by_name(datacenter_name)
+ if not datacenter:
+ return None
+ filter_spec = Folder.FilterSpec(type=Folder.Type.VIRTUAL_MACHINE,
+ names=set([folder_name]),
+ datacenters=set([datacenter]))
+ folder_summaries = self.api_client.vcenter.Folder.list(filter_spec)
+ folder = folder_summaries[0].folder if len(folder_summaries) > 0 else None
+ return folder
+
+ def get_resource_pool_by_name(self, datacenter_name, resourcepool_name, cluster_name=None, host_name=None):
+ """
+ Returns the identifier of a resource pool
+ with the mentioned names.
+ """
+ datacenter = self.get_datacenter_by_name(datacenter_name)
+ if not datacenter:
+ return None
+ clusters = None
+ if cluster_name:
+ clusters = self.get_cluster_by_name(datacenter_name, cluster_name)
+ if clusters:
+ clusters = set([clusters])
+ hosts = None
+ if host_name:
+ hosts = self.get_host_by_name(datacenter_name, host_name)
+ if hosts:
+ hosts = set([hosts])
+ names = set([resourcepool_name]) if resourcepool_name else None
+ filter_spec = ResourcePool.FilterSpec(datacenters=set([datacenter]),
+ names=names,
+ clusters=clusters)
+ resource_pool_summaries = self.api_client.vcenter.ResourcePool.list(filter_spec)
+ resource_pool = resource_pool_summaries[0].resource_pool if len(resource_pool_summaries) > 0 else None
+ return resource_pool
+
+ def get_datastore_by_name(self, datacenter_name, datastore_name):
+ """
+ Returns the identifier of a datastore
+ with the mentioned names.
+ """
+ datacenter = self.get_datacenter_by_name(datacenter_name)
+ if not datacenter:
+ return None
+ names = set([datastore_name]) if datastore_name else None
+ filter_spec = Datastore.FilterSpec(datacenters=set([datacenter]),
+ names=names)
+ datastore_summaries = self.api_client.vcenter.Datastore.list(filter_spec)
+ datastore = datastore_summaries[0].datastore if len(datastore_summaries) > 0 else None
+ return datastore
+
+ def get_cluster_by_name(self, datacenter_name, cluster_name):
+ """
+ Returns the identifier of a cluster
+ with the mentioned names.
+ """
+ datacenter = self.get_datacenter_by_name(datacenter_name)
+ if not datacenter:
+ return None
+ names = set([cluster_name]) if cluster_name else None
+ filter_spec = Cluster.FilterSpec(datacenters=set([datacenter]),
+ names=names)
+ cluster_summaries = self.api_client.vcenter.Cluster.list(filter_spec)
+ cluster = cluster_summaries[0].cluster if len(cluster_summaries) > 0 else None
+ return cluster
+
+ def get_host_by_name(self, datacenter_name, host_name):
+ """
+ Returns the identifier of a Host
+ with the mentioned names.
+ """
+ datacenter = self.get_datacenter_by_name(datacenter_name)
+ if not datacenter:
+ return None
+ names = set([host_name]) if host_name else None
+ filter_spec = Host.FilterSpec(datacenters=set([datacenter]),
+ names=names)
+ host_summaries = self.api_client.vcenter.Host.list(filter_spec)
+ host = host_summaries[0].host if len(host_summaries) > 0 else None
+ return host
+
+ @staticmethod
+ def search_svc_object_by_name(service, svc_obj_name=None):
+ """
+ Return service object by name
+ Args:
+ service: Service object
+ svc_obj_name: Name of service object to find
+
+ Returns: Service object if found else None
+
+ """
+ if not svc_obj_name:
+ return None
+
+ for svc_object in service.list():
+ svc_obj = service.get(svc_object)
+ if svc_obj.name == svc_obj_name:
+ return svc_obj
+ return None
+
+ def get_tag_by_name(self, tag_name=None):
+ """
+ Return tag object by name
+ Args:
+ tag_name: Name of tag
+
+ Returns: Tag object if found else None
+ """
+ if not tag_name:
+ return None
+
+ return self.search_svc_object_by_name(service=self.api_client.tagging.Tag, svc_obj_name=tag_name)
+
+ def get_category_by_name(self, category_name=None):
+ """
+ Return category object by name
+ Args:
+ category_name: Name of category
+
+ Returns: Category object if found else None
+ """
+ if not category_name:
+ return None
+
+ return self.search_svc_object_by_name(service=self.api_client.tagging.Category, svc_obj_name=category_name)
+
+ def get_tag_by_category_id(self, tag_name=None, category_id=None):
+ """
+ Return tag object by category id
+ Args:
+ tag_name: Name of tag
+ category_id: Id of category
+ Returns: Tag object if found else None
+ """
+ if tag_name is None:
+ return None
+
+ if category_id is None:
+ return self.search_svc_object_by_name(service=self.api_client.tagging.Tag, svc_obj_name=tag_name)
+
+ result = None
+ for tag_id in self.api_client.tagging.Tag.list_tags_for_category(category_id):
+ tag_obj = self.api_client.tagging.Tag.get(tag_id)
+ if tag_obj.name == tag_name:
+ result = tag_obj
+ break
+
+ return result
+
+ def get_tag_by_category_name(self, tag_name=None, category_name=None):
+ """
+ Return tag object by category name
+ Args:
+ tag_name: Name of tag
+ category_id: Id of category
+ Returns: Tag object if found else None
+ """
+ category_id = None
+ if category_name is not None:
+ category_obj = self.get_category_by_name(category_name=category_name)
+ if category_obj is not None:
+ category_id = category_obj.id
+
+ return self.get_tag_by_category_id(tag_name=tag_name, category_id=category_id)
+
+ def get_tag_by_category(self, tag_name=None, category_name=None, category_id=None):
+ """
+ Return tag object by name and category name specified
+ Args:
+ tag_name: Name of tag
+ category_name: Name of category (mutually exclusive with 'category_id')
+ category_id: Id of category, if known in advance (mutually exclusive with 'category_name')
+ Returns: Tag object if found else None
+ """
+ message = "The method 'get_tag_by_category' is deprecated and scheduled for removal. "\
+ "Please update your code and use 'get_tag_by_category_id' or 'get_tag_by_category_name' instead"
+ self.module.deprecate(message, version='4.0.0', collection_name='community.vmware')
+
+ if not tag_name:
+ return None
+
+ if category_id or category_name:
+ if not category_id:
+ category_obj = self.get_category_by_name(category_name=category_name)
+
+ if not category_obj:
+ return None
+
+ category_id = category_obj.id
+
+ for tag_object in self.api_client.tagging.Tag.list_tags_for_category(category_id):
+ tag_obj = self.api_client.tagging.Tag.get(tag_object)
+
+ if tag_obj.name == tag_name:
+ return tag_obj
+ else:
+ return self.search_svc_object_by_name(service=self.api_client.tagging.Tag, svc_obj_name=tag_name)
diff --git a/ansible_collections/community/vmware/plugins/module_utils/vmware_spbm.py b/ansible_collections/community/vmware/plugins/module_utils/vmware_spbm.py
new file mode 100644
index 000000000..b33b5123e
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/module_utils/vmware_spbm.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+try:
+ from pyVmomi import pbm
+ from pyVim.connect import SoapStubAdapter
+except ImportError:
+ pass
+
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi
+
+
+class SPBM(PyVmomi):
+ def __init__(self, module):
+ super(SPBM, self).__init__(module)
+ self.spbm_content = None
+ self.spbm_si = None
+ self.version = "pbm.version.version2"
+
+ def get_spbm_connection(self):
+ """
+ Creates a Service instance for VMware SPBM
+ """
+ client_stub = self.si._GetStub()
+ try:
+ session_cookie = client_stub.cookie.split('"')[1]
+ except IndexError:
+ self.module.fail_json(msg="Failed to get session cookie")
+ ssl_context = client_stub.schemeArgs.get('context')
+ additional_headers = {'vcSessionCookie': session_cookie}
+ hostname = self.module.params['hostname']
+ if not hostname:
+ self.module.fail_json(msg="Please specify required parameter - hostname")
+ stub = SoapStubAdapter(host=hostname, path="/pbm/sdk", version=self.version,
+ sslContext=ssl_context, requestContext=additional_headers)
+
+ self.spbm_si = pbm.ServiceInstance("ServiceInstance", stub)
+ self.spbm_content = self.spbm_si.PbmRetrieveServiceContent()
+
+ def find_storage_profile_by_name(self, profile_name):
+ storage_profile = None
+ self.get_spbm_connection()
+ pm = self.spbm_content.profileManager
+ profile_ids = pm.PbmQueryProfile(resourceType=pbm.profile.ResourceType(resourceType="STORAGE"),
+ profileCategory="REQUIREMENT")
+ if len(profile_ids) > 0:
+ storage_profiles = pm.PbmRetrieveContent(profileIds=profile_ids)
+ for profile in storage_profiles:
+ if profile.name == profile_name:
+ storage_profile = profile
+ else:
+ self.module.warn("Unable to get storage profile IDs with STORAGE resource type and REQUIREMENT profile category.")
+
+ return storage_profile
diff --git a/ansible_collections/community/vmware/plugins/modules/__init__.py b/ansible_collections/community/vmware/plugins/modules/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/__init__.py
diff --git a/ansible_collections/community/vmware/plugins/modules/vcenter_domain_user_group_info.py b/ansible_collections/community/vmware/plugins/modules/vcenter_domain_user_group_info.py
new file mode 100644
index 000000000..4200784ec
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vcenter_domain_user_group_info.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: vcenter_domain_user_group_info
+short_description: Gather user or group information of a domain
+author:
+ - sky-joker (@sky-joker)
+description:
+ - This module can be used to gather information about user or group of a domain.
+options:
+ domain:
+ description:
+ - The I(domain) to be specified searching.
+ type: str
+ default: vsphere.local
+ search_string:
+ description:
+ - The I(search_string) is a string to be specified searching.
+ - Specify the domain user or group name to be searched.
+ type: str
+ required: true
+ belongs_to_group:
+ description:
+ - If a group existing, returned contains only users or groups that directly belong to the specified group.
+ type: str
+ belongs_to_user:
+ description:
+ - If a user existing, returned contains only groups that directly contain the specified user.
+ type: str
+ exact_match:
+ description:
+ - If I(exact_match) is C(true), it indicates the I(search_string) passed should match a user or group name exactly.
+ type: bool
+ default: false
+ find_users:
+ description:
+ - If I(find_users) is C(true), domain users will be included in the result.
+ type: bool
+ default: true
+ find_groups:
+ description:
+ - If I(find_groups) is C(true), domain groups will be included in the result.
+ type: bool
+ default: true
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Gather all domain user and group of vsphere.local
+ community.vmware.vcenter_domain_user_group_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ domain: vsphere.local
+ search_string: ''
+ register: gather_all_domain_user_group_result
+
+- name: Gather all domain user and group included the administrator string
+ community.vmware.vcenter_domain_user_group_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ domain: vsphere.local
+ search_string: administrator
+ register: gather_domain_user_group_result
+
+- name: Gather all domain user of vsphere.local
+ community.vmware.vcenter_domain_user_group_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ domain: vsphere.local
+ search_string: ''
+ find_users: true
+ find_groups: false
+ register: gather_all_domain_user_result
+
+- name: Gather administrator user by exact match condition
+ community.vmware.vcenter_domain_user_group_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ domain: vsphere.local
+ search_string: "vsphere.local\\administrator"
+ exact_match: true
+ register: gather_administrator_user_exact_match_result
+'''
+
+RETURN = r'''
+domain_user_groups:
+ description: list of domain user and group information
+ returned: success
+ type: list
+ sample: >-
+ [
+ {
+ "fullName": "Administrator vsphere.local",
+ "group": false,
+ "principal": "Administrator"
+ }
+ ]
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VcenterDomainUserGroupInfo(PyVmomi):
+ def __init__(self, module):
+ super(VcenterDomainUserGroupInfo, self).__init__(module)
+ self.domain = self.params['domain']
+ self.search_string = self.params['search_string']
+ self.belongs_to_group = self.params['belongs_to_group']
+ self.belongs_to_user = self.params['belongs_to_user']
+ self.exact_match = self.params['exact_match']
+ self.find_users = self.params['find_users']
+ self.find_groups = self.params['find_groups']
+
+ def execute(self):
+ user_directory_manager = self.content.userDirectory
+
+ if not self.domain.upper() in user_directory_manager.domainList:
+ self.module.fail_json(msg="domain not found: %s" % self.domain)
+
+ try:
+ user_search_result = user_directory_manager.RetrieveUserGroups(
+ domain=self.domain,
+ searchStr=self.search_string,
+ belongsToGroup=self.belongs_to_group,
+ belongsToUser=self.belongs_to_user,
+ exactMatch=self.exact_match,
+ findUsers=self.find_users,
+ findGroups=self.find_groups
+ )
+ except vim.fault.NotFound as e:
+ self.module.fail_json(msg="%s" % to_native(e.msg))
+ except Exception as e:
+ self.module.fail_json(msg="Couldn't gather domain user or group information: %s" % to_native(e))
+
+ user_search_result_normalization = []
+ if user_search_result:
+ for object in user_search_result:
+ user_search_result_normalization.append({
+ 'fullName': object.fullName,
+ 'principal': object.principal,
+ 'group': object.group
+ })
+
+ self.module.exit_json(changed=False, domain_user_groups=user_search_result_normalization)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ domain=dict(type='str', default='vsphere.local'),
+ search_string=dict(type='str', required=True),
+ belongs_to_group=dict(type='str', default=None),
+ belongs_to_user=dict(type='str', default=None),
+ exact_match=dict(type='bool', default=False),
+ find_users=dict(type='bool', default=True),
+ find_groups=dict(type='bool', default=True)
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vcenter_domain_user_info = VcenterDomainUserGroupInfo(module)
+ vcenter_domain_user_info.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vcenter_extension.py b/ansible_collections/community/vmware/plugins/modules/vcenter_extension.py
new file mode 100644
index 000000000..cca92fcae
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vcenter_extension.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Michael Tipton <mike () ibeta.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vcenter_extension
+short_description: Register/deregister vCenter Extensions
+description:
+ - This module can be used to register/deregister vCenter Extensions.
+author:
+ - Michael Tipton (@castawayegr)
+options:
+ extension_key:
+ description:
+ - The extension key of the extension to install or uninstall.
+ required: true
+ type: str
+ version:
+ description:
+ - The version of the extension you are installing or uninstalling.
+ required: true
+ type: str
+ name:
+ description:
+ - Required for C(state=present). The name of the extension you are installing.
+ type: str
+ company:
+ description:
+ - Required for C(state=present). The name of the company that makes the extension.
+ type: str
+ description:
+ description:
+ - Required for C(state=present). A short description of the extension.
+ type: str
+ email:
+ description:
+ - Required for C(state=present). Administrator email to use for extension.
+ type: str
+ url:
+ description:
+ - Required for C(state=present). Link to server hosting extension zip file to install.
+ type: str
+ ssl_thumbprint:
+ description:
+ - Required for C(state=present). SSL thumbprint of the extension hosting server.
+ type: str
+ server_type:
+ description:
+ - Required for C(state=present). Type of server being used to install the extension (SOAP, REST, HTTP, etc.).
+ default: vsphere-client-serenity
+ type: str
+ client_type:
+ description:
+ - Required for C(state=present). Type of client the extension is (win32, .net, linux, etc.).
+ default: vsphere-client-serenity
+ type: str
+ visible:
+ description:
+ - Show the extension in solution manager inside vCenter.
+ default: true
+ type: bool
+ state:
+ description:
+ - Add or remove vCenter Extension.
+ choices: [absent, present]
+ default: present
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+ - name: Register vCenter Extension
+ community.vmware.vcenter_extension:
+ hostname: "{{ groups['vcsa'][0] }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ site_password }}"
+ extension_key: "{{ extension_key }}"
+ version: "1.0"
+ company: "Acme"
+ name: "Acme Extension"
+ description: "acme management"
+ email: "user@example.com"
+ url: "https://10.0.0.1/ACME-vSphere-web-plugin-1.0.zip"
+ ssl_thumbprint: "{{ ssl_thumbprint }}"
+ state: present
+ delegate_to: localhost
+ register: register_extension
+
+ - name: Deregister vCenter Extension
+ community.vmware.vcenter_extension:
+ hostname: "{{ groups['vcsa'][0] }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ site_password }}"
+ extension_key: "{{ extension_key }}"
+ version: "1.0"
+ state: absent
+ delegate_to: localhost
+ register: deregister_extension
+'''
+
+RETURN = r'''
+result:
+ description: information about performed operation
+ returned: always
+ type: str
+ sample: "'com.acme.Extension' installed."
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+import datetime
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import connect_to_api, vmware_argument_spec
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ extension_key=dict(type='str', required=True, no_log=False),
+ version=dict(type='str', required=True),
+ email=dict(type='str', required=False),
+ description=dict(type='str', required=False),
+ company=dict(type='str', required=False),
+ name=dict(type='str', required=False),
+ url=dict(type='str', required=False),
+ ssl_thumbprint=dict(type='str', required=False),
+ client_type=dict(type='str', default='vsphere-client-serenity', required=False),
+ server_type=dict(type='str', default='vsphere-client-serenity', required=False),
+ visible=dict(type='bool', default='True', required=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ required_if=[
+ ['state', 'present', ['email', 'description', 'company', 'name', 'url', 'ssl_thumbprint', 'server_type', 'client_type']]
+ ]
+ )
+
+ state = module.params['state']
+ extension_key = module.params['extension_key']
+ version = module.params['version']
+ email = module.params['email']
+ desc = module.params['description']
+ name = module.params['name']
+ company = module.params['company']
+ client_type = module.params['client_type']
+ server_type = module.params['server_type']
+ url = module.params['url']
+ visible = module.params['visible']
+ thumbprint = module.params['ssl_thumbprint']
+
+ content = connect_to_api(module, False)
+ em = content.extensionManager
+ key_check = em.FindExtension(extension_key)
+ results = dict(changed=False, installed=dict())
+
+ if state == 'present' and key_check:
+ results['changed'] = False
+ results['installed'] = "'%s' is already installed" % (extension_key)
+
+ elif state == 'present' and not key_check:
+ extension = vim.Extension()
+ extension.key = extension_key
+ extension.company = company
+ extension.version = version
+ extension.lastHeartbeatTime = datetime.datetime.now()
+ description = vim.Description()
+ description.label = name
+ description.summary = desc
+ extension.description = description
+ extension.shownInSolutionManager = visible
+
+ client = vim.Extension.ClientInfo()
+ client.company = company
+ client.version = version
+ client.description = description
+ client.type = client_type
+ client.url = url
+ extension.client = [client]
+
+ server = vim.Extension.ServerInfo()
+ server.company = company
+ server.description = description
+ server.type = server_type
+ server.adminEmail = email
+ server.serverThumbprint = thumbprint
+ server.url = url
+ extension.server = [server]
+
+ em.RegisterExtension(extension)
+ results['changed'] = True
+ results['installed'] = "'%s' installed." % (extension_key)
+
+ elif state == 'absent' and key_check:
+ em.UnregisterExtension(extension_key)
+ results['changed'] = True
+ results['installed'] = "'%s' uninstalled." % (extension_key)
+
+ elif state == 'absent' and not key_check:
+ results['changed'] = False
+ results['installed'] = "'%s' is not installed." % (extension_key)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vcenter_extension_info.py b/ansible_collections/community/vmware/plugins/modules/vcenter_extension_info.py
new file mode 100644
index 000000000..222cfb4ea
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vcenter_extension_info.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vcenter_extension_info
+short_description: Gather info vCenter extensions
+description:
+- This module can be used to gather information about vCenter extension.
+author:
+- Abhijeet Kasurde (@Akasurde)
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info about vCenter Extensions
+ community.vmware.vcenter_extension_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ register: ext_info
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+extension_info:
+ description: List of extensions
+ returned: success
+ type: list
+ sample: [
+ {
+ "extension_company": "VMware, Inc.",
+ "extension_key": "com.vmware.vim.ls",
+ "extension_label": "License Services",
+ "extension_last_heartbeat_time": "2018-09-03T09:36:18.003768+00:00",
+ "extension_subject_name": "",
+ "extension_summary": "Provides various license services",
+ "extension_type": "",
+ "extension_version": "5.0"
+ },
+ {
+ "extension_company": "VMware Inc.",
+ "extension_key": "com.vmware.vim.sms",
+ "extension_label": "VMware vCenter Storage Monitoring Service",
+ "extension_last_heartbeat_time": "2018-09-03T09:36:18.005730+00:00",
+ "extension_subject_name": "",
+ "extension_summary": "Storage Monitoring and Reporting",
+ "extension_type": "",
+ "extension_version": "5.5"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VmwareExtManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareExtManager, self).__init__(module)
+
+ def gather_plugin_info(self):
+ result = dict(changed=False, extension_info=[])
+ ext_manager = self.content.extensionManager
+ if not ext_manager:
+ self.module.exit_json(**result)
+
+ for ext in ext_manager.extensionList:
+ ext_info = dict(
+ extension_label=ext.description.label,
+ extension_summary=ext.description.summary,
+ extension_key=ext.key,
+ extension_company=ext.company,
+ extension_version=ext.version,
+ extension_type=ext.type if ext.type else '',
+ extension_subject_name=ext.subjectName if ext.subjectName else '',
+ extension_last_heartbeat_time=ext.lastHeartbeatTime,
+ )
+ result['extension_info'].append(ext_info)
+
+ self.module.exit_json(**result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ vcenter_extension_info_mgr = VmwareExtManager(module)
+ vcenter_extension_info_mgr.gather_plugin_info()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vcenter_folder.py b/ansible_collections/community/vmware/plugins/modules/vcenter_folder.py
new file mode 100644
index 000000000..149e3ed33
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vcenter_folder.py
@@ -0,0 +1,365 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vcenter_folder
+short_description: Manage folders on given datacenter
+description:
+- This module can be used to create, delete, move and rename folder on then given datacenter.
+- This module is only supported for vCenter.
+author:
+- Abhijeet Kasurde (@Akasurde)
+- Christian Kotte (@ckotte) <christian.kotte@gmx.de>
+- Jan Meerkamp (@meerkampdvv)
+options:
+ datacenter:
+ description:
+ - Name of the datacenter.
+ required: true
+ aliases: ['datacenter_name']
+ type: str
+ folder_name:
+ description:
+ - Name of folder to be managed.
+ - This is case sensitive parameter.
+ - Folder name should be under 80 characters. This is a VMware restriction.
+ required: true
+ type: str
+ parent_folder:
+ description:
+ - Name of the parent folder under which new folder needs to be created.
+ - This is case sensitive parameter.
+ - "If user wants to create a folder under '/DC0/vm/vm_folder', this value will be 'vm_folder'."
+ - "If user wants to create a folder under '/DC0/vm/folder1/folder2', this value will be 'folder1/folder2'."
+ required: false
+ type: str
+ folder_type:
+ description:
+ - This is type of folder.
+ - "If set to C(vm), then 'VM and Template Folder' is created under datacenter."
+ - "If set to C(host), then 'Host and Cluster Folder' is created under datacenter."
+ - "If set to C(datastore), then 'Storage Folder' is created under datacenter."
+ - "If set to C(network), then 'Network Folder' is created under datacenter."
+ - This parameter is required, if C(state) is set to C(present) and parent_folder is absent.
+ - This option is ignored, if C(parent_folder) is set.
+ default: vm
+ type: str
+ required: false
+ choices: [ datastore, host, network, vm ]
+ state:
+ description:
+ - State of folder.
+ - If set to C(present) without parent folder parameter, then folder with C(folder_type) is created.
+ - If set to C(present) with parent folder parameter, then folder in created under parent folder. C(folder_type) is ignored.
+ - If set to C(absent), then folder is unregistered and destroyed.
+ default: present
+ choices: [ present, absent ]
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create a VM folder on given datacenter
+ community.vmware.vcenter_folder:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter_name
+ folder_name: sample_vm_folder
+ folder_type: vm
+ state: present
+ register: vm_folder_creation_result
+ delegate_to: localhost
+
+- name: Create a datastore folder on given datacenter
+ community.vmware.vcenter_folder:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter_name
+ folder_name: sample_datastore_folder
+ folder_type: datastore
+ state: present
+ register: datastore_folder_creation_result
+ delegate_to: localhost
+
+- name: Create a sub folder under VM folder on given datacenter
+ community.vmware.vcenter_folder:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter_name
+ folder_name: sample_sub_folder
+ parent_folder: vm_folder
+ state: present
+ register: sub_folder_creation_result
+ delegate_to: localhost
+
+- name: Delete a VM folder on given datacenter
+ community.vmware.vcenter_folder:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter_name
+ folder_name: sample_vm_folder
+ folder_type: vm
+ state: absent
+ register: vm_folder_deletion_result
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: The detail about the new folder
+ returned: On success
+ type: complex
+ contains:
+ path:
+ description: the full path of the new folder
+ type: str
+ msg:
+ description: string stating about result
+ type: str
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, wait_for_task, get_all_objs
+from ansible.module_utils._text import to_native
+
+
+class VmwareFolderManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareFolderManager, self).__init__(module)
+ datacenter_name = self.params.get('datacenter', None)
+ self.datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
+ if self.datacenter_obj is None:
+ self.module.fail_json(msg="Failed to find datacenter %s" % datacenter_name)
+
+ self.datacenter_folder_type = {
+ 'vm': self.datacenter_obj.vmFolder,
+ 'host': self.datacenter_obj.hostFolder,
+ 'datastore': self.datacenter_obj.datastoreFolder,
+ 'network': self.datacenter_obj.networkFolder,
+ }
+
+ def ensure(self):
+ """
+ Manage internal state management
+ """
+ state = self.module.params.get('state')
+ folder_type = self.module.params.get('folder_type')
+ folder_name = self.module.params.get('folder_name')
+ parent_folder = self.module.params.get('parent_folder', None)
+ results = {'changed': False, 'result': {}}
+ if state == 'present':
+ # Check if the folder already exists
+ p_folder_obj = None
+ if parent_folder:
+ if "/" in parent_folder:
+ parent_folder_parts = parent_folder.strip('/').split('/')
+ p_folder_obj = None
+ for part in parent_folder_parts:
+ part_folder_obj = self.get_folder(folder_name=part,
+ folder_type=folder_type,
+ parent_folder=p_folder_obj)
+ if not part_folder_obj:
+ self.module.fail_json(msg="Could not find folder %s" % part)
+ p_folder_obj = part_folder_obj
+ child_folder_obj = self.get_folder(folder_name=folder_name,
+ folder_type=folder_type,
+ parent_folder=p_folder_obj)
+ if child_folder_obj:
+ results['result'] = "Folder %s already exists under" \
+ " parent folder %s" % (folder_name, parent_folder)
+ self.module.exit_json(**results)
+ else:
+ p_folder_obj = self.get_folder(folder_name=parent_folder,
+ folder_type=folder_type)
+
+ if not p_folder_obj:
+ self.module.fail_json(msg="Parent folder %s does not exist" % parent_folder)
+
+ # Check if folder exists under parent folder
+ child_folder_obj = self.get_folder(folder_name=folder_name,
+ folder_type=folder_type,
+ parent_folder=p_folder_obj)
+ if child_folder_obj:
+ results['result']['path'] = self.get_folder_path(child_folder_obj)
+ results['result'] = "Folder %s already exists under" \
+ " parent folder %s" % (folder_name, parent_folder)
+ self.module.exit_json(**results)
+ else:
+ folder_obj = self.get_folder(folder_name=folder_name,
+ folder_type=folder_type,
+ recurse=True)
+
+ if folder_obj:
+ results['result']['path'] = self.get_folder_path(folder_obj)
+ results['result']['msg'] = "Folder %s already exists" % folder_name
+ self.module.exit_json(**results)
+
+ # Create a new folder
+ try:
+ if parent_folder and p_folder_obj:
+ if self.module.check_mode:
+ results['msg'] = "Folder '%s' of type '%s' under '%s' will be created." % \
+ (folder_name, folder_type, parent_folder)
+ else:
+ new_folder = p_folder_obj.CreateFolder(folder_name)
+ results['result']['path'] = self.get_folder_path(new_folder)
+ results['result']['msg'] = "Folder '%s' of type '%s' under '%s' created" \
+ " successfully." % (folder_name, folder_type, parent_folder)
+ results['changed'] = True
+ elif not parent_folder and not p_folder_obj:
+ if self.module.check_mode:
+ results['msg'] = "Folder '%s' of type '%s' will be created." % (folder_name, folder_type)
+ else:
+ new_folder = self.datacenter_folder_type[folder_type].CreateFolder(folder_name)
+ results['result']['msg'] = "Folder '%s' of type '%s' created successfully." % (folder_name, folder_type)
+ results['result']['path'] = self.get_folder_path(new_folder)
+ results['changed'] = True
+ except vim.fault.DuplicateName as duplicate_name:
+ # To be consistent with the other vmware modules, We decided to accept this error
+ # and the playbook should simply carry on with other tasks.
+ # User will have to take care of this exception
+ # https://github.com/ansible/ansible/issues/35388#issuecomment-362283078
+ results['changed'] = False
+ results['msg'] = "Failed to create folder as another object has same name" \
+ " in the same target folder : %s" % to_native(duplicate_name.msg)
+ except vim.fault.InvalidName as invalid_name:
+ self.module.fail_json(msg="Failed to create folder as folder name is not a valid "
+ "entity name : %s" % to_native(invalid_name.msg))
+ except Exception as general_exc:
+ self.module.fail_json(msg="Failed to create folder due to generic"
+ " exception : %s " % to_native(general_exc))
+ self.module.exit_json(**results)
+ elif state == 'absent':
+ # Check if the folder already exists
+ p_folder_obj = None
+ if parent_folder:
+ if "/" in parent_folder:
+ parent_folder_parts = parent_folder.strip('/').split('/')
+ p_folder_obj = None
+ for part in parent_folder_parts:
+ part_folder_obj = self.get_folder(folder_name=part,
+ folder_type=folder_type,
+ parent_folder=p_folder_obj)
+ if not part_folder_obj:
+ self.module.fail_json(msg="Could not find folder %s" % part)
+ p_folder_obj = part_folder_obj
+ folder_obj = self.get_folder(folder_name=folder_name,
+ folder_type=folder_type,
+ parent_folder=p_folder_obj)
+ else:
+ p_folder_obj = self.get_folder(folder_name=parent_folder,
+ folder_type=folder_type)
+
+ if not p_folder_obj:
+ self.module.fail_json(msg="Parent folder %s does not exist" % parent_folder)
+
+ # Check if folder exists under parent folder
+ folder_obj = self.get_folder(folder_name=folder_name,
+ folder_type=folder_type,
+ parent_folder=p_folder_obj)
+ else:
+ folder_obj = self.get_folder(folder_name=folder_name,
+ folder_type=folder_type,
+ recurse=True)
+ if folder_obj:
+ try:
+ if parent_folder:
+ if self.module.check_mode:
+ results['changed'] = True
+ results['msg'] = "Folder '%s' of type '%s' under '%s' will be removed." % \
+ (folder_name, folder_type, parent_folder)
+ else:
+ if folder_type == 'vm':
+ task = folder_obj.UnregisterAndDestroy()
+ else:
+ task = folder_obj.Destroy()
+ results['changed'], results['msg'] = wait_for_task(task=task)
+ else:
+ if self.module.check_mode:
+ results['changed'] = True
+ results['msg'] = "Folder '%s' of type '%s' will be removed." % (folder_name, folder_type)
+ else:
+ if folder_type == 'vm':
+ task = folder_obj.UnregisterAndDestroy()
+ else:
+ task = folder_obj.Destroy()
+ results['changed'], results['msg'] = wait_for_task(task=task)
+ except vim.fault.ConcurrentAccess as concurrent_access:
+ self.module.fail_json(msg="Failed to remove folder as another client"
+ " modified folder before this operation : %s" % to_native(concurrent_access.msg))
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(msg="Failed to remove folder as folder is in"
+ " invalid state : %s" % to_native(invalid_state.msg))
+ except Exception as gen_exec:
+ self.module.fail_json(msg="Failed to remove folder due to generic"
+ " exception %s " % to_native(gen_exec))
+ self.module.exit_json(**results)
+
+ def get_folder(self, folder_name, folder_type, parent_folder=None, recurse=False):
+ """
+ Get managed object of folder by name
+ Returns: Managed object of folder by name
+
+ """
+ parent_folder = parent_folder or self.datacenter_folder_type[folder_type]
+
+ folder_objs = get_all_objs(self.content, [vim.Folder], parent_folder, recurse=recurse)
+ for folder in folder_objs:
+ if folder.name == folder_name:
+ return folder
+
+ return None
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
+ folder_name=dict(type='str', required=True),
+ parent_folder=dict(type='str', required=False),
+ state=dict(type='str',
+ choices=['present', 'absent'],
+ default='present'),
+ folder_type=dict(type='str',
+ default='vm',
+ choices=['datastore', 'host', 'network', 'vm'],
+ required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if len(module.params.get('folder_name')) > 79:
+ module.fail_json(msg="Failed to manage folder as folder_name can only contain 80 characters.")
+
+ vcenter_folder_mgr = VmwareFolderManager(module)
+ if not vcenter_folder_mgr.is_vcenter():
+ module.fail_json(msg="Module vcenter_folder is meant for vCenter, hostname %s "
+ "is not vCenter server." % module.params.get('hostname'))
+ vcenter_folder_mgr.ensure()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vcenter_license.py b/ansible_collections/community/vmware/plugins/modules/vcenter_license.py
new file mode 100644
index 000000000..6836d03e0
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vcenter_license.py
@@ -0,0 +1,272 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: vcenter_license
+short_description: Manage VMware vCenter license keys
+description:
+- Add and delete vCenter, ESXi server license keys.
+author:
+- Dag Wieers (@dagwieers)
+options:
+ labels:
+ description:
+ - The optional labels of the license key to manage in vSphere vCenter.
+ - This is dictionary with key/value pair.
+ default: {
+ 'source': 'ansible'
+ }
+ type: dict
+ license:
+ description:
+ - The license key to manage in vSphere vCenter.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether to add (C(present)) or remove (C(absent)) the license key.
+ choices: [absent, present]
+ default: present
+ type: str
+ esxi_hostname:
+ description:
+ - The hostname of the ESXi server to which the specified license will be assigned.
+ - This parameter is optional.
+ type: str
+ datacenter:
+ description:
+ - The datacenter name to use for the operation.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster to apply vSAN license.
+ type: str
+notes:
+- This module will also auto-assign the current vCenter to the license key
+ if the product matches the license key, and vCenter us currently assigned
+ an evaluation license only.
+- The evaluation license (00000-00000-00000-00000-00000) is not listed
+ when unused.
+- If C(esxi_hostname) is specified, then will assign the C(license) key to
+ the ESXi host.
+- If C(esxi_hostname) is not specified, then will just register the C(license) key to
+ vCenter inventory without assigning it to an ESXi host.
+extends_documentation_fragment:
+- community.vmware.vmware.vcenter_documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add a new vCenter license
+ community.vmware.vcenter_license:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ license: f600d-21ae3-5592b-249e0-cc341
+ state: present
+ delegate_to: localhost
+
+- name: Remove an (unused) vCenter license
+ community.vmware.vcenter_license:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ license: f600d-21ae3-5592b-249e0-cc341
+ state: absent
+ delegate_to: localhost
+
+- name: Add ESXi license and assign to the ESXi host
+ community.vmware.vcenter_license:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ license: f600d-21ae3-5592b-249e0-dd502
+ state: present
+ delegate_to: localhost
+
+- name: Add vSAN license and assign to the given cluster
+ community.vmware.vcenter_license:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: '{{ datacenter_name }}'
+ cluster_name: '{{ cluster_name }}'
+ license: f600d-21ae3-5592b-249e0-dd502
+ state: present
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+licenses:
+ description: list of license keys after module executed
+ returned: always
+ type: list
+ sample:
+ - f600d-21ae3-5592b-249e0-cc341
+ - 143cc-0e942-b2955-3ea12-d006f
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_hostsystem_by_name
+
+
+class VcenterLicenseMgr(PyVmomi):
+ def __init__(self, module):
+ super(VcenterLicenseMgr, self).__init__(module)
+
+ def find_key(self, licenses, license):
+ for item in licenses:
+ if item.licenseKey == license:
+ return item
+ return None
+
+ def list_keys(self, licenses):
+ keys = []
+ for item in licenses:
+ # Filter out evaluation license key
+ if item.used is None:
+ continue
+ keys.append(item.licenseKey)
+ return keys
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ labels=dict(type='dict', default=dict(source='ansible')),
+ license=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ esxi_hostname=dict(type='str'),
+ datacenter=dict(type='str'),
+ cluster_name=dict(type='str'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ license = module.params['license']
+ state = module.params['state']
+
+ # FIXME: This does not seem to work on vCenter v6.0
+ labels = []
+ for k in module.params['labels']:
+ kv = vim.KeyValue()
+ kv.key = k
+ kv.value = module.params['labels'][k]
+ labels.append(kv)
+
+ result = dict(
+ changed=False,
+ diff=dict(),
+ )
+
+ pyv = VcenterLicenseMgr(module)
+ if not pyv.is_vcenter():
+ module.fail_json(msg="vcenter_license is meant for vCenter, hostname %s "
+ "is not vCenter server." % module.params.get('hostname'))
+
+ lm = pyv.content.licenseManager
+
+ result['licenses'] = pyv.list_keys(lm.licenses)
+ if module._diff:
+ result['diff']['before'] = '\n'.join(result['licenses']) + '\n'
+
+ if state == 'present':
+ if license not in result['licenses']:
+ result['changed'] = True
+ if module.check_mode:
+ result['licenses'].append(license)
+ else:
+ lm.AddLicense(license, labels)
+
+ key = pyv.find_key(lm.licenses, license)
+ entityId = None
+ if key is not None:
+ lam = lm.licenseAssignmentManager
+ assigned_license = None
+ datacenter = module.params['datacenter']
+ datacenter_obj = None
+ if datacenter:
+ datacenter_obj = pyv.find_datacenter_by_name(datacenter)
+ if not datacenter_obj:
+ module.fail_json(msg="Unable to find the datacenter %(datacenter)s" % module.params)
+
+ cluster = module.params['cluster_name']
+ # if cluster_name parameter is provided then search the cluster object in vcenter
+ if cluster:
+ cluster_obj = pyv.find_cluster_by_name(cluster_name=cluster, datacenter_name=datacenter_obj)
+ if not cluster_obj:
+ msg = "Unable to find the cluster %(cluster_name)s"
+ if datacenter:
+ msg += " in datacenter %(datacenter)s"
+ module.fail_json(msg=msg % module.params)
+ entityId = cluster_obj._moId
+ # if esxi_hostname parameter is provided then search the esxi object in vcenter
+ elif module.params['esxi_hostname']:
+ esxi_host = find_hostsystem_by_name(pyv.content, module.params['esxi_hostname'])
+ if esxi_host is None:
+ module.fail_json(msg='Cannot find the specified ESXi host "%s".' % module.params['esxi_hostname'])
+ entityId = esxi_host._moId
+ # e.g., key.editionKey is "esx.enterprisePlus.cpuPackage", not sure all keys are in this format
+ if 'esx' not in key.editionKey:
+ module.warn('License key "%s" edition "%s" is not suitable for ESXi server' % (license, key.editionKey))
+ # backward compatibility - check if it's is a vCenter licence key
+ elif pyv.content.about.name in key.name or 'vCenter Server' in key.name:
+ entityId = pyv.content.about.instanceUuid
+
+ # if we have found a cluster, an esxi or a vCenter object we try to assign the licence
+ if entityId:
+ try:
+ assigned_license = lam.QueryAssignedLicenses(entityId=entityId)
+ except Exception as e:
+ module.fail_json(msg='Could not query vCenter "%s" assigned license info due to %s.' % (entityId, to_native(e)))
+
+ if not assigned_license or (len(assigned_license) != 0 and assigned_license[0].assignedLicense.licenseKey != license):
+ try:
+ lam.UpdateAssignedLicense(entity=entityId, licenseKey=license)
+ except Exception:
+ module.fail_json(msg='Could not assign "%s" (%s) to vCenter.' % (license, key.name))
+ result['changed'] = True
+ result['licenses'] = pyv.list_keys(lm.licenses)
+ else:
+ module.fail_json(msg='License "%s" is not existing or can not be added' % license)
+ if module._diff:
+ result['diff']['after'] = '\n'.join(result['licenses']) + '\n'
+
+ elif state == 'absent' and license in result['licenses']:
+
+ # Check if key is in use
+ key = pyv.find_key(lm.licenses, license)
+ if key.used > 0:
+ module.fail_json(msg='Cannot remove key "%s", still in use %s time(s).' % (license, key.used))
+
+ result['changed'] = True
+ if module.check_mode:
+ result['licenses'].remove(license)
+ else:
+ lm.RemoveLicense(license)
+ result['licenses'] = pyv.list_keys(lm.licenses)
+ if module._diff:
+ result['diff']['after'] = '\n'.join(result['licenses']) + '\n'
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vcenter_standard_key_provider.py b/ansible_collections/community/vmware/plugins/modules/vcenter_standard_key_provider.py
new file mode 100644
index 000000000..f931ebf38
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vcenter_standard_key_provider.py
@@ -0,0 +1,690 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, Ansible Project
+# Copyright: (c) 2021, VMware, Inc. All Rights Reserved
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vcenter_standard_key_provider
+short_description: Add, reconfigure or remove Standard Key Provider on vCenter server
+description: >
+ This module is used for adding, reconfiguring or removing Standard Key Provider on vCenter server.
+ Refer to VMware docs for more information: L(Standard Key Provider,
+ https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.security.doc/GUID-6DB1E745-9624-43EA-847C-DD2F767CB94B.html)
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+options:
+ name:
+ description: Name of the Key Provider to be added, reconfigured or removed from vCenter.
+ type: str
+ required: true
+ mark_default:
+ description:
+ - Set specified Key Provider with name C(name) as the default Key Provider.
+ - If new added Key Provider is the only key provider in vCenter, then will mark it as default after adding.
+ type: bool
+ default: false
+ state:
+ description:
+ - If set to C(absent), the named Key Provider will be removed from vCenter.
+ - If set to C(present), the named existing Key Provider will be reconfigured or new Key Provider will be added.
+ type: str
+ choices:
+ - present
+ - absent
+ default: present
+ kms_info:
+ description:
+ - The information of an external key server (KMS).
+ - C(kms_name), C(kms_ip) are required when adding a Standard Key Provider.
+ - If C(kms_port) is not specified, the default port 5696 will be used.
+ - C(kms_ip), C(kms_port) can be reconfigured for an existing KMS with name C(kms_name).
+ type: list
+ default: []
+ elements: dict
+ suboptions:
+ kms_name:
+ description: Name of the KMS to be configured.
+ type: str
+ kms_ip:
+ description: IP address of the external KMS.
+ type: str
+ kms_port:
+ description: Port of the external KMS.
+ type: int
+ remove_kms:
+ description: Remove the configured KMS with name C(kms_name) from the KMIP cluster.
+ type: bool
+ proxy_server:
+ description: Address of the proxy server to connect to KMS.
+ type: str
+ proxy_port:
+ description: Port of the proxy server.
+ type: int
+ kms_username:
+ description: Username to authenticate to the KMS.
+ type: str
+ kms_password:
+ description: Password to authenticate to the KMS.
+ type: str
+ make_kms_trust_vc:
+ description:
+ - After adding the Standard Key Provider to the vCenter Server, you can establish a trusted connection, the
+ exact process depends on the certificates that the key provider accepts, and on your company policy.
+ - Three methods implemented here,
+ (1) upload client certificate and private key through C(upload_client_cert) and C(upload_client_key) parameters,
+ (2) generate, update, download vCenter self signed certificate through C(download_self_signed_cert) parameter,
+ (3) download generated Certificate Signing Request(CSR) through C(download_client_csr) parameter, send it to
+ KMS then upload the KMS signed CSR through C(upload_kms_signed_client_csr) parameter.
+ - This is not set to be mandatory, if not set, please go to vCenter to setup trust connection with KMS manually.
+ type: dict
+ suboptions:
+ upload_client_cert:
+ description:
+ - The absolute file path of client certificate.
+ - Request a certificate and private key from the KMS vendor. The files are X509 files in PEM format.
+ - The certificate might be already trusted by the KMS server.
+ type: path
+ upload_client_key:
+ description: The absolute file path of client private key to be uploaded together with C(upload_client_cert).
+ type: path
+ download_self_signed_cert:
+ description: The absolute path on local machine for keeping vCenter generated self signed client cert.
+ type: path
+ download_client_csr:
+ description:
+ - The absolute path on local machine for keeping vCenter generated CSR.
+ - Then upload the KMS signed CSR using C(upload_kms_signed_client_csr) to vCenter.
+ type: path
+ upload_kms_signed_client_csr:
+ description: The absolute file path of KMS signed CSR downloaded from C(download_client_csr).
+ type: path
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Add a new Standard Key Provider with client certificate and private key
+ community.vmware.vcenter_standard_key_provider:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: 'test_standard_kp'
+ state: 'present'
+ mark_default: true
+ kms_info:
+ - kms_name: test_kms_1
+ kms_ip: 192.168.1.10
+ make_kms_trust_vc:
+ upload_client_cert: "/tmp/test_cert.pem"
+ upload_client_key: "/tmp/test_cert_key.pem"
+ register: add_skp_result
+
+- name: Remove the KMS from the key provider cluster
+ community.vmware.vcenter_standard_key_provider:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: 'test_standard_kp'
+ state: 'present'
+ kms_info:
+ - kms_name: test_kms_1
+ remove_kms: true
+ register: remove_kms_result
+
+- name: Remove the Standard Key Provider
+ community.vmware.vcenter_standard_key_provider:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: 'test_standard_kp'
+ state: 'absent'
+ register: remove_kp_result
+'''
+
+RETURN = r'''
+key_provider_clusters:
+ description: the Key Provider cluster info
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "has_backup": null,
+ "key_id": null,
+ "key_provide_id": "test_standard",
+ "management_type": null,
+ "servers": [
+ {
+ "address": "192.168.1.10",
+ "name": "test_kms",
+ "port": 5696,
+ "protocol": "",
+ "proxy": "",
+ "proxy_port": null,
+ "user_name": ""
+ }
+ ],
+ "tpm_required": null,
+ "use_as_default": true
+ }
+ ]
+'''
+
+HAS_PYVMOMI = False
+try:
+ from pyVmomi import vim
+
+ HAS_PYVMOMI = True
+except ImportError:
+ pass
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.crypto_mgr = self.content.cryptoManager
+ self.key_provider_id = None
+
+ def get_key_provider_clusters(self):
+ key_provider_clusters = None
+ try:
+ if self.vcenter_version_at_least(version=(7, 0, 0)):
+ key_provider_clusters = self.crypto_mgr.ListKmsClusters(includeKmsServers=True)
+ else:
+ key_provider_clusters = self.crypto_mgr.ListKmipServers()
+ except Exception as e:
+ self.module.fail_json(msg="Failed to get key provider clusters info with exception: %s" % to_native(e))
+
+ return key_provider_clusters
+
+ @staticmethod
+ def get_key_provider_by_name(key_provider_clusters, name):
+ key_provider_cluster = None
+ if not name or not key_provider_clusters:
+ return key_provider_cluster
+ for kp_cluster in key_provider_clusters:
+ if kp_cluster.clusterId.id == name:
+ key_provider_cluster = kp_cluster
+
+ return key_provider_cluster
+
+ @staticmethod
+ def gather_key_provider_cluster_info(key_provider_clusters):
+ key_provider_cluster_facts = []
+ if not key_provider_clusters:
+ return key_provider_cluster_facts
+ for kp_item in key_provider_clusters:
+ kp_info = dict(
+ key_provide_id=kp_item.clusterId.id,
+ use_as_default=kp_item.useAsDefault,
+ management_type=kp_item.managementType,
+ has_backup=kp_item.hasBackup,
+ tpm_required=kp_item.tpmRequired,
+ key_id=kp_item.keyId
+ )
+ kmip_servers = []
+ if hasattr(kp_item, 'servers') and len(kp_item.servers) != 0:
+ for kmip_item in kp_item.servers:
+ kmip_info = dict(
+ name=kmip_item.name,
+ address=kmip_item.address,
+ port=kmip_item.port,
+ protocol=kmip_item.protocol,
+ proxy=kmip_item.proxyAddress,
+ proxy_port=kmip_item.proxyPort,
+ user_name=kmip_item.userName
+ )
+ kmip_servers.append(kmip_info)
+ kp_info.update(servers=kmip_servers)
+ key_provider_cluster_facts.append(kp_info)
+
+ return key_provider_cluster_facts
+
+ def set_default_key_provider(self):
+ # Since vSphere API 6.5
+ try:
+ self.crypto_mgr.MarkDefault(self.key_provider_id)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to mark default key provider to '%s' with exception: %s"
+ % (self.key_provider_id.id, to_native(e)))
+
+ @staticmethod
+ def create_key_provider_id(key_provider_name):
+ key_provider_id = None
+ if key_provider_name:
+ key_provider_id = vim.encryption.KeyProviderId()
+ key_provider_id.id = key_provider_name
+
+ return key_provider_id
+
+ @staticmethod
+ def create_kmip_server_info(kms_info, proxy_user_info):
+ kmip_server_info = None
+ if kms_info:
+ kmip_server_info = vim.encryption.KmipServerInfo()
+ kmip_server_info.name = kms_info.get('kms_name')
+ kmip_server_info.address = kms_info.get('kms_ip')
+ if kms_info.get('kms_port') is None:
+ kmip_server_info.port = 5696
+ else:
+ kmip_server_info.port = kms_info.get('kms_port')
+ if proxy_user_info:
+ if proxy_user_info.get('proxy_server'):
+ kmip_server_info.proxyAddress = proxy_user_info['proxy_server']
+ if proxy_user_info.get('proxy_port'):
+ kmip_server_info.proxyPort = proxy_user_info['proxy_port']
+ if proxy_user_info.get('kms_username'):
+ kmip_server_info.userName = proxy_user_info['kms_username']
+
+ return kmip_server_info
+
+ @staticmethod
+ def create_kmip_server_spec(key_provider_id, kms_server_info, kms_password=None):
+ kmip_server_spec = None
+ if key_provider_id and kms_server_info:
+ kmip_server_spec = vim.encryption.KmipServerSpec()
+ kmip_server_spec.clusterId = key_provider_id
+ kmip_server_spec.info = kms_server_info
+ if kms_password:
+ kmip_server_spec.password = kms_password
+
+ return kmip_server_spec
+
+ def setup_standard_kp(self, kp_name, kms_info_list, proxy_user_config_dict):
+ kp_id = self.create_key_provider_id(kp_name)
+ for kms_info in kms_info_list:
+ server_cert = None
+ kms_server = self.create_kmip_server_info(kms_info, proxy_user_config_dict)
+ kms_spec = self.create_kmip_server_spec(kp_id, kms_server, proxy_user_config_dict.get('kms_password'))
+ try:
+ self.crypto_mgr.RegisterKmipServer(server=kms_spec)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to add Standard Key Provider '%s' with exception: %s"
+ % (kp_name, to_native(e)))
+ try:
+ server_cert = self.crypto_mgr.RetrieveKmipServerCert(keyProvider=kp_id, server=kms_server).certificate
+ except Exception as e:
+ self.module.fail_json(msg="Failed to retrieve KMS server certificate with exception: %s" % to_native(e))
+ if not server_cert:
+ self.module.fail_json(msg="Got empty KMS server certificate: '%s'" % server_cert)
+ try:
+ self.crypto_mgr.UploadKmipServerCert(cluster=kp_id, certificate=server_cert)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to upload KMS server certificate for key provider '%s' with"
+ " exception: %s" % (kp_name, to_native(e)))
+
+ return kp_id
+
+ def add_kmip_to_standard_kp(self, kms_info, proxy_user_config_dict):
+ kmip_server_info = self.create_kmip_server_info(kms_info, proxy_user_config_dict)
+ kmip_server_spec = self.create_kmip_server_spec(self.key_provider_id, kmip_server_info,
+ proxy_user_config_dict.get('kms_password'))
+ try:
+ self.crypto_mgr.RegisterKmipServer(server=kmip_server_spec)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to add the KMIP server to Key Provider cluster with exception: %s"
+ % to_native(e))
+
+ def change_kmip_in_standard_kp(self, existing_kmip_info, kms_info, proxy_user_config_dict):
+ changed = False
+ if kms_info:
+ if kms_info.get('kms_ip') and existing_kmip_info.address != kms_info['kms_ip']:
+ existing_kmip_info.address = kms_info['kms_ip']
+ changed = True
+ if kms_info.get('kms_port') and existing_kmip_info.port != kms_info['kms_port']:
+ existing_kmip_info.port = kms_info['kms_port']
+ changed = True
+ if proxy_user_config_dict:
+ if proxy_user_config_dict.get('proxy_server') and \
+ existing_kmip_info.proxyAddress != proxy_user_config_dict['proxy_server']:
+ existing_kmip_info.proxyAddress = proxy_user_config_dict['proxy_server']
+ changed = True
+ if proxy_user_config_dict.get('proxy_port') and \
+ existing_kmip_info.proxyPort != proxy_user_config_dict['proxy_port']:
+ existing_kmip_info.proxyPort = proxy_user_config_dict['proxy_port']
+ changed = True
+ if proxy_user_config_dict.get('kms_username') and \
+ existing_kmip_info.userName != proxy_user_config_dict['kms_username']:
+ existing_kmip_info.userName = proxy_user_config_dict['kms_username']
+ changed = True
+ if changed:
+ kmip_server_spec = self.create_kmip_server_spec(self.key_provider_id, existing_kmip_info,
+ proxy_user_config_dict.get('kms_password'))
+ try:
+ # Since vSphere API 6.5
+ self.crypto_mgr.UpdateKmipServer(server=kmip_server_spec)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to update KMIP server info with exception: %s" % to_native(e))
+
+ return changed
+
+ def reconfig_kmip_standard_kp(self, kmip_cluster_servers, kms_info_list, proxy_user_config_dict):
+ changed = False
+ # kms server reconfigure
+ if len(kms_info_list) != 0:
+ for kms_info in kms_info_list:
+ existing_kmip = None
+ for kmip_server in kmip_cluster_servers:
+ if kmip_server.name == kms_info.get('kms_name'):
+ existing_kmip = kmip_server
+ # reconfigure existing kms server
+ if existing_kmip is not None:
+ if kms_info.get('remove_kms'):
+ self.remove_kms_server(self.key_provider_id, kms_info.get('kms_name'))
+ kms_changed = True
+ else:
+ kms_changed = self.change_kmip_in_standard_kp(existing_kmip, kms_info, proxy_user_config_dict)
+ # no kms server with specified name
+ else:
+ if kms_info.get('remove_kms'):
+ self.module.fail_json(msg="Not find named KMS server to remove in the key provider cluster '%s'"
+ % self.key_provider_id.id)
+ self.add_kmip_to_standard_kp(kms_info, proxy_user_config_dict)
+ kms_changed = True
+ if kms_changed:
+ changed = True
+ # no kms specified in kms_info, then only update proxy or user info
+ for kmip_server in kmip_cluster_servers:
+ kms_changed = self.change_kmip_in_standard_kp(kmip_server, kms_info=None,
+ proxy_user_config_dict=proxy_user_config_dict)
+ if kms_changed:
+ changed = True
+
+ return changed
+
+ def update_self_signed_client_cert(self, dest_path):
+ if not os.path.exists(dest_path):
+ try:
+ os.makedirs(dest_path)
+ except OSError as e:
+ self.module.fail_json(msg="Specified destination path '%s' not exist, but failed to create it with"
+ " exception: %s" % (dest_path, to_native(e)))
+ client_cert_file_path = os.path.join(dest_path, self.key_provider_id.id + '_self_signed_cert.pem')
+ client_cert = self.crypto_mgr.RetrieveSelfSignedClientCert(self.key_provider_id)
+ if not client_cert:
+ try:
+ client_cert = self.crypto_mgr.GenerateSelfSignedClientCert(self.key_provider_id)
+ except Exception as e:
+ self.module.fail_json(msg="Generate self signed client certificate failed with exception: %s"
+ % to_native(e))
+ if not client_cert:
+ self.module.fail_json(msg="Generated self signed client certificate is empty '%s'" % client_cert)
+ try:
+ self.crypto_mgr.UpdateSelfSignedClientCert(self.key_provider_id, client_cert)
+ except Exception as e:
+ self.module.fail_json(msg="Update self signed client cert failed with exception: %s" % to_native(e))
+ client_cert_file = open(client_cert_file_path, 'w')
+ client_cert_file.write(client_cert)
+ client_cert_file.close()
+
+ return client_cert_file_path
+
+ def download_client_csr_file(self, dest_path):
+ if not os.path.exists(dest_path):
+ try:
+ os.makedirs(dest_path)
+ except OSError as e:
+ self.module.fail_json(msg="Specified destination path '%s' not exist, but failed to create it with"
+ " exception: %s" % (dest_path, to_native(e)))
+ client_csr_file_path = os.path.join(dest_path, self.key_provider_id.id + '_client_csr.pem')
+ client_csr = self.crypto_mgr.RetrieveClientCsr(self.key_provider_id)
+ if not client_csr:
+ try:
+ client_csr = self.crypto_mgr.GenerateClientCsr(self.key_provider_id)
+ except Exception as e:
+ self.module.fail_json(msg="Generate client CSR failed with exception: %s" % to_native(e))
+ if not client_csr:
+ self.module.fail_json(msg="Generated client CSR is empty '%s'" % client_csr)
+ else:
+ client_csr_file = open(client_csr_file_path, 'w')
+ client_csr_file.write(client_csr)
+ client_csr_file.close()
+
+ return client_csr_file_path
+
+ def upload_kms_signed_csr(self, kms_signed_csr):
+ kms_signed_csr_file = open(kms_signed_csr)
+ kms_signed_csr_content = kms_signed_csr_file.read()
+ kms_signed_csr_file.close()
+ try:
+ self.crypto_mgr.UpdateKmsSignedCsrClientCert(self.key_provider_id, kms_signed_csr_content)
+ except Exception as e:
+ self.module.fail_json(msg="Update KMS signed client CSR cert failed with exception: '%s'" % to_native(e))
+
+ def upload_client_cert_key(self, client_cert, client_key):
+ client_cert_file = open(client_cert)
+ private_key_file = open(client_key)
+ client_cert_content = client_cert_file.read()
+ private_key_content = private_key_file.read()
+ client_cert_file.close()
+ private_key_file.close()
+ try:
+ self.crypto_mgr.UploadClientCert(cluster=self.key_provider_id, certificate=client_cert_content,
+ privateKey=private_key_content)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to upload client certificate and private key for key provider '%s'"
+ " with exception: %s" % (self.key_provider_id.id, to_native(e)))
+
+ def download_upload_cert_for_trust(self, kms_trust_vc_config):
+ changed = False
+ cert_info = ''
+ client_cert = kms_trust_vc_config.get('upload_client_cert')
+ client_key = kms_trust_vc_config.get('upload_client_key')
+ kms_signed_csr = kms_trust_vc_config.get('upload_kms_signed_client_csr')
+ self_signed_cert_path = kms_trust_vc_config.get('download_self_signed_cert')
+ client_csr_path = kms_trust_vc_config.get('download_client_csr')
+
+ if client_cert and client_key:
+ if not os.path.exists(client_cert) or not os.path.exists(client_key):
+ self.module.fail_json(msg="Configured 'upload_client_cert' file: '%s', or 'upload_client_key' file:"
+ " '%s' does not exist." % (client_cert, client_key))
+ self.upload_client_cert_key(client_cert, client_key)
+ cert_info = "Client cert file '%s', key file '%s' uploaded for key provider '%s'" \
+ % (client_cert, client_key, self.key_provider_id.id)
+ changed = True
+ elif kms_signed_csr:
+ if not os.path.exists(kms_signed_csr):
+ self.module.fail_json(msg="Configured 'upload_kms_signed_client_csr' file: '%s' does not exist."
+ % kms_signed_csr)
+ self.upload_kms_signed_csr(kms_signed_csr)
+ cert_info = "KMS signed client CSR '%s' uploaded for key provider '%s'" % (kms_signed_csr,
+ self.key_provider_id.id)
+ changed = True
+ elif self_signed_cert_path:
+ cert_file_path = self.update_self_signed_client_cert(self_signed_cert_path)
+ cert_info = "Client self signed certificate file '%s' for key provider '%s' updated and downloaded" \
+ % (cert_file_path, self.key_provider_id.id)
+ changed = True
+ elif client_csr_path:
+ cert_file_path = self.download_client_csr_file(client_csr_path)
+ cert_info = "Client certificate signing request file '%s' for key provider '%s' downloaded" \
+ % (cert_file_path, self.key_provider_id.id)
+
+ return changed, cert_info
+
+ def remove_kms_server(self, key_provider_id, kms_server):
+ # Since vSphere API 6.5
+ try:
+ self.crypto_mgr.RemoveKmipServer(clusterId=key_provider_id, serverName=kms_server)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to remove KMIP server '%s' from key provider '%s' with exception: %s"
+ % (kms_server, key_provider_id.id, to_native(e)))
+
+ def remove_kms_cluster(self, kp_cluster):
+ for kms in kp_cluster.servers:
+ self.remove_kms_server(kp_cluster.clusterId, kms.name)
+
+ def get_key_provider_type(self, kmip_cluster_info):
+ key_provider_type = ''
+ if kmip_cluster_info is None:
+ return key_provider_type
+ # Native Key Provider is supported from vSphere 7.0.2
+ if not self.vcenter_version_at_least(version=(7, 0, 2)):
+ key_provider_type = 'standard'
+ else:
+ if kmip_cluster_info.managementType == 'vCenter':
+ key_provider_type = 'standard'
+ elif kmip_cluster_info.managementType == 'nativeProvider':
+ key_provider_type = 'native'
+ else:
+ key_provider_type = kmip_cluster_info.managementType
+
+ return key_provider_type
+
+ def key_provider_operation(self):
+ results = {'failed': False, 'changed': False}
+ kp_name = self.params['name']
+ if not kp_name:
+ self.module.fail_json(msg="Please set a valid name of key provider via 'name' parameter, now it's '%s',"
+ % kp_name)
+ key_provider_clusters = self.get_key_provider_clusters()
+ # Find if there is existing Key Provider with the specified name
+ existing_kp_cluster = self.get_key_provider_by_name(key_provider_clusters, kp_name)
+ existing_kp_type = self.get_key_provider_type(existing_kp_cluster)
+ if existing_kp_cluster is not None:
+ if existing_kp_type and existing_kp_type == 'native':
+ self.module.fail_json(msg="Native Key Provider with name '%s' already exist, please change to another"
+ " name for Standard Key Provider operation using this module." % kp_name)
+ self.key_provider_id = existing_kp_cluster.clusterId
+
+ # Add a new Key Provider or reconfigure the existing Key Provider
+ if self.params['state'] == 'present':
+ is_default_kp = False
+ proxy_user_config = dict()
+ proxy_user_config.update(
+ proxy_server=self.params.get('proxy_server'),
+ proxy_port=self.params.get('proxy_port'),
+ kms_username=self.params.get('kms_username'),
+ kms_password=self.params.get('kms_password')
+ )
+ if existing_kp_cluster is not None:
+ is_default_kp = existing_kp_cluster.useAsDefault
+ # For existing Standard Key Provider, KMS servers can be reconfigured
+ if self.module.check_mode:
+ results['desired_operation'] = "reconfig standard key provider"
+ results['target_key_provider'] = kp_name
+ self.module.exit_json(**results)
+ else:
+ results['operation'] = "reconfig standard key provider"
+ results['changed'] = self.reconfig_kmip_standard_kp(existing_kp_cluster.servers,
+ self.params['kms_info'], proxy_user_config)
+ # Named Key Provider not exist
+ else:
+ # Add a Standard Key Provider, KMS name, IP are required
+ if len(self.params['kms_info']) == 0:
+ self.module.fail_json(msg="Please set 'kms_info' when add new standard key provider")
+ for configured_kms_info in self.params['kms_info']:
+ if configured_kms_info.get('remove_kms'):
+ self.module.fail_json(msg="Specified key provider '%s' not exist, so no KMS server to be"
+ " removed." % kp_name)
+ if self.module.check_mode:
+ results['desired_operation'] = "add standard key provider"
+ self.module.exit_json(**results)
+ else:
+ results['operation'] = "add standard key provider"
+ new_key_provider_id = self.setup_standard_kp(kp_name, self.params['kms_info'], proxy_user_config)
+ if new_key_provider_id:
+ self.key_provider_id = new_key_provider_id
+ # If this new added key provider is the only key provider, then mark it default
+ if len(key_provider_clusters) == 0:
+ self.params['mark_default'] = True
+ results['changed'] = True
+
+ if self.key_provider_id and self.params['mark_default'] and not is_default_kp:
+ self.set_default_key_provider()
+ results['changed'] = True
+ if self.key_provider_id and self.params.get('make_kms_trust_vc'):
+ results['changed'], cert_info = self.download_upload_cert_for_trust(self.params['make_kms_trust_vc'])
+ results['msg'] = cert_info
+ # Remove Key Provider
+ else:
+ if self.module.check_mode:
+ results['desired_operation'] = "remove standard key provider"
+ else:
+ results['operation'] = "remove standard key provider"
+ # Named Key Provider not found
+ if existing_kp_cluster is None:
+ output_msg = "Key Provider with name '%s' is not found." % kp_name
+ if self.module.check_mode:
+ results['msg'] = output_msg
+ self.module.exit_json(**results)
+ else:
+ self.module.fail_json(msg=output_msg)
+ else:
+ if self.module.check_mode:
+ results['target_key_provider'] = kp_name
+ self.module.exit_json(**results)
+ else:
+ self.remove_kms_cluster(existing_kp_cluster)
+ results['changed'] = True
+
+ if results['changed']:
+ key_provider_clusters = self.get_key_provider_clusters()
+ results['key_provider_clusters'] = self.gather_key_provider_cluster_info(key_provider_clusters)
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ kms_info=dict(
+ type='list',
+ default=[],
+ elements='dict',
+ options=dict(
+ kms_name=dict(type='str'),
+ kms_ip=dict(type='str'),
+ kms_port=dict(type='int'),
+ remove_kms=dict(type='bool')
+ )
+ ),
+ proxy_server=dict(type='str'),
+ proxy_port=dict(type='int'),
+ kms_username=dict(type='str'),
+ kms_password=dict(type='str', no_log=True),
+ make_kms_trust_vc=dict(
+ type='dict',
+ options=dict(
+ upload_client_cert=dict(type='path'),
+ upload_client_key=dict(type='path'),
+ download_self_signed_cert=dict(type='path'),
+ download_client_csr=dict(type='path'),
+ upload_kms_signed_client_csr=dict(type='path')
+ )
+ ),
+ mark_default=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ config_key_provider = PyVmomiHelper(module)
+ if not config_key_provider.is_vcenter():
+ module.fail_json(msg="hostname '%s' is not set to the vCenter server, please connect to vCenter for key"
+ " provider operations." % module.params.get('hostname'))
+ if not config_key_provider.vcenter_version_at_least(version=(6, 5, 0)):
+ module.fail_json(msg="vCenter server '%s' version is not >= 6.5.0, key provider is supported from vSphere 6.5."
+ % module.params.get('hostname'))
+ try:
+ config_key_provider.key_provider_operation()
+ except Exception as e:
+ module.fail_json(msg="Failed to configure key provider on vCenter with exception : %s" % to_native(e))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_about_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_about_info.py
new file mode 100644
index 000000000..2c92ec926
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_about_info.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_about_info
+short_description: Provides information about VMware server to which user is connecting to
+description:
+- This module can be used to gather information about VMware server to which user is trying to connect.
+author:
+- Abhijeet Kasurde (@Akasurde)
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Provide information about vCenter
+ community.vmware.vmware_about_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ delegate_to: localhost
+ register: vcenter_about_info
+
+- name: Provide information about a standalone ESXi server
+ community.vmware.vmware_about_info:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ delegate_to: localhost
+ register: esxi_about_info
+'''
+
+RETURN = r'''
+about_info:
+ description:
+ - dict about VMware server
+ returned: success
+ type: str
+ sample:
+ {
+ "api_type": "VirtualCenter",
+ "api_version": "6.5",
+ "build": "5973321",
+ "instance_uuid": "dbed6e0c-bd88-4ef6-b594-21283e1c677f",
+ "license_product_name": "VMware VirtualCenter Server",
+ "license_product_version": "6.0",
+ "locale_build": "000",
+ "locale_version": "INTL",
+ "os_type": "darwin-amd64",
+ "product_full_name": "VMware vCenter Server 6.5.0 build-5973321",
+ "product_line_id": "vpx",
+ "product_name": "VMware vCenter Server (govmomi simulator)",
+ "vendor": "VMware, Inc.",
+ "version": "6.5.0"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VmwareAboutManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareAboutManager, self).__init__(module)
+
+ def gather_about_info(self):
+
+ if not self.content:
+ self.module.exit_json(changed=False, about_info=dict())
+
+ about = self.content.about
+
+ self.module.exit_json(
+ changed=False,
+ about_info=dict(
+ product_name=about.name,
+ product_full_name=about.fullName,
+ vendor=about.vendor,
+ version=about.version,
+ build=about.build,
+ locale_version=about.localeVersion,
+ locale_build=about.localeBuild,
+ os_type=about.osType,
+ product_line_id=about.productLineId,
+ api_type=about.apiType,
+ api_version=about.apiVersion,
+ instance_uuid=about.instanceUuid,
+ license_product_name=about.licenseProductName,
+ license_product_version=about.licenseProductVersion,
+ )
+ )
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ vmware_about_info_mgr = VmwareAboutManager(module)
+ vmware_about_info_mgr.gather_about_info()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_category.py b/ansible_collections/community/vmware/plugins/modules/vmware_category.py
new file mode 100644
index 000000000..fa5371359
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_category.py
@@ -0,0 +1,378 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_category
+short_description: Manage VMware categories
+description:
+- This module can be used to create / delete / update VMware categories.
+- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.
+- All variables and VMware object names are case sensitive.
+author:
+- Abhijeet Kasurde (@Akasurde)
+requirements:
+- vSphere Automation SDK
+options:
+ category_name:
+ description:
+ - The name of category to manage.
+ required: true
+ type: str
+ category_description:
+ description:
+ - The category description.
+ - This is required only if C(state) is set to C(present).
+ - This parameter is ignored, when C(state) is set to C(absent).
+ default: ''
+ type: str
+ category_cardinality:
+ description:
+ - The category cardinality.
+ - This parameter is ignored, when updating existing category.
+ choices: ['multiple', 'single']
+ default: 'multiple'
+ type: str
+ new_category_name:
+ description:
+ - The new name for an existing category.
+ - This value is used while updating an existing category.
+ type: str
+ state:
+ description:
+ - The state of category.
+ - If set to C(present) and category does not exists, then category is created.
+ - If set to C(present) and category exists, then category is updated.
+ - If set to C(absent) and category exists, then category is deleted.
+ - If set to C(absent) and category does not exists, no action is taken.
+ - Process of updating category only allows name, description change.
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ type: str
+ associable_object_types:
+ description:
+ - List of object types that can be associated with the given category.
+ choices:
+ - All objects
+ - Cluster
+ - Content Library
+ - Datacenter
+ - Datastore
+ - Datastore Cluster
+ - Distributed Port Group
+ - Distributed Switch
+ - Folder
+ - Host
+ - Library item
+ - Network
+ - Host Network
+ - Opaque Network
+ - Resource Pool
+ - vApp
+ - Virtual Machine
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.vmware.vmware_rest_client.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create a category
+ community.vmware.vmware_category:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ category_name: Sample_Cat_0001
+ category_description: Sample Description
+ category_cardinality: 'multiple'
+ state: present
+
+- name: Rename category
+ community.vmware.vmware_category:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ category_name: Sample_Category_0001
+ new_category_name: Sample_Category_0002
+ state: present
+
+- name: Update category description
+ community.vmware.vmware_category:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ category_name: Sample_Category_0001
+ category_description: Some fancy description
+ state: present
+
+- name: Delete category
+ community.vmware.vmware_category:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ category_name: Sample_Category_0002
+ state: absent
+
+- name: Create category with 2 associable object types
+ community.vmware.vmware_category:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ category_name: 'Sample_Category_0003'
+ category_description: 'sample description'
+ associable_object_types:
+ - Datastore
+ - Cluster
+ state: present
+'''
+
+RETURN = r'''
+category_results:
+ description: dictionary of category metadata
+ returned: on success
+ type: dict
+ sample: {
+ "category_id": "urn:vmomi:InventoryServiceCategory:d7120bda-9fa5-4f92-9d71-aa1acff2e5a8:GLOBAL",
+ "msg": "Category NewCat_0001 updated."
+ }
+'''
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.version import LooseVersion
+from ansible_collections.community.vmware.plugins.module_utils.vmware import connect_to_api
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+
+try:
+ from pyVmomi.VmomiSupport import XMLNS_VMODL_BASE
+except ImportError:
+ XMLNS_VMODL_BASE = "urn:vim25"
+
+try:
+ from com.vmware.cis.tagging_client import CategoryModel
+ from com.vmware.vapi.std.errors_client import Error
+except ImportError:
+ pass
+
+
+class VmwareCategory(VmwareRestClient):
+ def __init__(self, module):
+ super(VmwareCategory, self).__init__(module)
+ self.category_service = self.api_client.tagging.Category
+ self.global_categories = dict()
+ self.category_name = self.params.get('category_name')
+ self.get_all_categories()
+ self.content = connect_to_api(self.module, return_si=False)
+
+ def ensure_state(self):
+ """Manage internal states of categories. """
+ desired_state = self.params.get('state')
+ states = {
+ 'present': {
+ 'present': self.state_update_category,
+ 'absent': self.state_create_category,
+ },
+ 'absent': {
+ 'present': self.state_delete_category,
+ 'absent': self.state_unchanged,
+ }
+ }
+ states[desired_state][self.check_category_status()]()
+
+ def state_create_category(self):
+ """Create category."""
+ category_spec = self.category_service.CreateSpec()
+ category_spec.name = self.category_name
+ category_spec.description = self.params.get('category_description')
+
+ if self.params.get('category_cardinality') == 'single':
+ category_spec.cardinality = CategoryModel.Cardinality.SINGLE
+ else:
+ category_spec.cardinality = CategoryModel.Cardinality.MULTIPLE
+
+ associable_object_types = self.params.get('associable_object_types')
+
+ def append_namespace(object_name):
+ return '%s:%s' % (XMLNS_VMODL_BASE, object_name)
+
+ associable_data = {
+ # With Namespace
+ 'cluster': append_namespace('ClusterComputeResource'),
+ 'datastore': append_namespace('Datastore'),
+ 'datastore cluster': append_namespace('StoragePod'),
+ 'folder': append_namespace('Folder'),
+ 'host': append_namespace('HostSystem'),
+ 'library item': append_namespace('com.vmware.content.library.Item'),
+
+ # Without Namespace
+ 'datacenter': 'Datacenter',
+ 'distributed port group': 'DistributedVirtualPortgroup',
+ 'distributed switch': ['VmwareDistributedVirtualSwitch', 'DistributedVirtualSwitch'],
+ 'content library': 'com.vmware.content.Library',
+ 'resource pool': 'ResourcePool',
+ 'vapp': 'VirtualApp',
+ 'virtual machine': 'VirtualMachine',
+ 'network': ['Network', 'HostNetwork', 'OpaqueNetwork'],
+ 'host network': 'HostNetwork',
+ 'opaque network': 'OpaqueNetwork',
+ }
+ obj_types_set = []
+ if associable_object_types:
+ for obj_type in associable_object_types:
+ lower_obj_type = obj_type.lower()
+ if lower_obj_type == 'all objects':
+ if LooseVersion(self.content.about.version) < LooseVersion('7'):
+ break
+
+ for category in list(associable_data.values()):
+ if isinstance(category, list):
+ obj_types_set.extend(category)
+ else:
+ obj_types_set.append(category)
+ break
+ if lower_obj_type in associable_data:
+ value = associable_data.get(lower_obj_type)
+ if isinstance(value, list):
+ obj_types_set.extend(value)
+ else:
+ obj_types_set.append(value)
+ else:
+ obj_types_set.append(obj_type)
+
+ category_spec.associable_types = set(obj_types_set)
+
+ category_id = ''
+ try:
+ category_id = self.category_service.create(category_spec)
+ except Error as error:
+ self.module.fail_json(msg="%s" % self.get_error_message(error))
+
+ msg = "No category created"
+ changed = False
+ if category_id:
+ changed = True
+ msg = "Category '%s' created." % category_spec.name
+
+ self.module.exit_json(changed=changed,
+ category_results=dict(msg=msg, category_id=category_id))
+
+ def state_unchanged(self):
+ """Return unchanged state."""
+ self.module.exit_json(changed=False)
+
+ def state_update_category(self):
+ """Update category."""
+ category_id = self.global_categories[self.category_name]['category_id']
+ changed = False
+ results = dict(msg="Category %s is unchanged." % self.category_name,
+ category_id=category_id)
+
+ category_update_spec = self.category_service.UpdateSpec()
+ change_list = []
+ old_cat_desc = self.global_categories[self.category_name]['category_description']
+ new_cat_desc = self.params.get('category_description')
+ if new_cat_desc and new_cat_desc != old_cat_desc:
+ category_update_spec.description = new_cat_desc
+ results['msg'] = 'Category %s updated.' % self.category_name
+ change_list.append(True)
+
+ new_cat_name = self.params.get('new_category_name')
+ if new_cat_name in self.global_categories:
+ self.module.fail_json(msg="Unable to rename %s as %s already"
+ " exists in configuration." % (self.category_name, new_cat_name))
+ old_cat_name = self.global_categories[self.category_name]['category_name']
+
+ if new_cat_name and new_cat_name != old_cat_name:
+ category_update_spec.name = new_cat_name
+ results['msg'] = 'Category %s updated.' % self.category_name
+ change_list.append(True)
+
+ if any(change_list):
+ try:
+ self.category_service.update(category_id, category_update_spec)
+ changed = True
+ except Error as error:
+ self.module.fail_json(msg="%s" % self.get_error_message(error))
+
+ self.module.exit_json(changed=changed,
+ category_results=results)
+
+ def state_delete_category(self):
+ """Delete category."""
+ category_id = self.global_categories[self.category_name]['category_id']
+ try:
+ self.category_service.delete(category_id=category_id)
+ except Error as error:
+ self.module.fail_json(msg="%s" % self.get_error_message(error))
+ self.module.exit_json(changed=True,
+ category_results=dict(msg="Category '%s' deleted." % self.category_name,
+ category_id=category_id))
+
+ def check_category_status(self):
+ """
+ Check if category exists or not
+ Returns: 'present' if category found, else 'absent'
+
+ """
+ if self.category_name in self.global_categories:
+ return 'present'
+ return 'absent'
+
+ def get_all_categories(self):
+ """Retrieve all category information."""
+ try:
+
+ for category in self.category_service.list():
+ category_obj = self.category_service.get(category)
+ self.global_categories[category_obj.name] = dict(
+ category_description=category_obj.description,
+ category_used_by=category_obj.used_by,
+ category_cardinality=str(category_obj.cardinality),
+ category_associable_types=category_obj.associable_types,
+ category_id=category_obj.id,
+ category_name=category_obj.name,
+ )
+ except Error as error:
+ self.module.fail_json(msg=self.get_error_message(error))
+ except Exception as exc_err:
+ self.module.fail_json(msg=to_native(exc_err))
+
+
+def main():
+ argument_spec = VmwareRestClient.vmware_client_argument_spec()
+ argument_spec.update(
+ category_name=dict(type='str', required=True),
+ category_description=dict(type='str', default='', required=False),
+ category_cardinality=dict(type='str', choices=["multiple", "single"], default="multiple"),
+ new_category_name=dict(type='str'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ associable_object_types=dict(
+ type='list',
+ choices=[
+ 'All objects', 'Cluster', 'Content Library', 'Datacenter',
+ 'Datastore', 'Datastore Cluster', 'Distributed Port Group', 'Distributed Switch',
+ 'Folder', 'Host', 'Library item', 'Network',
+ 'Host Network', 'Opaque Network', 'Resource Pool', 'vApp',
+ 'Virtual Machine',
+ ],
+ elements='str',
+ ),
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ vmware_category = VmwareCategory(module)
+ vmware_category.ensure_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_category_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_category_info.py
new file mode 100644
index 000000000..10fdb162f
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_category_info.py
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_category_info
+short_description: Gather info about VMware tag categories
+description:
+- This module can be used to gather information about VMware tag categories.
+- Tag feature is introduced in vSphere 6 version, so this module is not supported in earlier versions of vSphere.
+- All variables and VMware object names are case sensitive.
+author:
+- Abhijeet Kasurde (@Akasurde)
+requirements:
+- vSphere Automation SDK
+extends_documentation_fragment:
+- community.vmware.vmware_rest_client.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info about tag categories
+ community.vmware.vmware_category_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ delegate_to: localhost
+ register: all_tag_category_info
+
+- name: Gather category id from given tag category
+ community.vmware.vmware_category_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ delegate_to: localhost
+ register: tag_category_results
+
+- set_fact:
+ category_id: "{{ item.category_id }}"
+ loop: "{{ tag_category_results.tag_category_info|json_query(query) }}"
+ vars:
+ query: "[?category_name==`Category0001`]"
+- debug: var=category_id
+
+'''
+
+RETURN = r'''
+tag_category_info:
+ description: metadata of tag categories
+ returned: always
+ type: list
+ sample: [
+ {
+ "category_associable_types": [],
+ "category_cardinality": "MULTIPLE",
+ "category_description": "awesome description",
+ "category_id": "urn:vmomi:InventoryServiceCategory:e785088d-6981-4b1c-9fb8-1100c3e1f742:GLOBAL",
+ "category_name": "Category0001",
+ "category_used_by": []
+ },
+ {
+ "category_associable_types": [
+ "VirtualMachine"
+ ],
+ "category_cardinality": "SINGLE",
+ "category_description": "another awesome description",
+ "category_id": "urn:vmomi:InventoryServiceCategory:ae5b7c6c-e622-4671-9b96-76e93adb70f2:GLOBAL",
+ "category_name": "template_tag",
+ "category_used_by": []
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+
+
+class VmwareCategoryInfoManager(VmwareRestClient):
+ def __init__(self, module):
+ super(VmwareCategoryInfoManager, self).__init__(module)
+ self.category_service = self.api_client.tagging.Category
+
+ def get_all_tag_categories(self):
+ """Retrieve all tag category information."""
+ global_tag_categories = []
+ for category in self.category_service.list():
+ category_obj = self.category_service.get(category)
+ global_tag_categories.append(
+ dict(
+ category_description=category_obj.description,
+ category_used_by=category_obj.used_by,
+ category_cardinality=str(category_obj.cardinality),
+ category_associable_types=category_obj.associable_types,
+ category_id=category_obj.id,
+ category_name=category_obj.name,
+ )
+ )
+
+ self.module.exit_json(changed=False, tag_category_info=global_tag_categories)
+
+
+def main():
+ argument_spec = VmwareRestClient.vmware_client_argument_spec()
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vmware_category_info = VmwareCategoryInfoManager(module)
+ vmware_category_info.get_all_tag_categories()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_cfg_backup.py b/ansible_collections/community/vmware/plugins/modules/vmware_cfg_backup.py
new file mode 100644
index 000000000..1a3a78813
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_cfg_backup.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, IBM Corp
+# Author(s): Andreas Nafpliotis <nafpliot@de.ibm.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_cfg_backup
+short_description: Backup / Restore / Reset ESXi host configuration
+description:
+ - This module can be used to perform various operations related to backup, restore and reset of ESXi host configuration.
+author:
+ - Andreas Nafpliotis (@nafpliot-ibm)
+notes:
+ - Works only for ESXi hosts
+ - For configuration load or reset, the host will be switched automatically to maintenance mode.
+options:
+ esxi_hostname:
+ description:
+ - Name of ESXi server. This is required only if authentication against a vCenter is done.
+ required: false
+ type: str
+ dest:
+ description:
+ - The destination where the ESXi configuration bundle will be saved. The I(dest) can be a folder or a file.
+ - If I(dest) is a folder, the backup file will be saved in the folder with the default filename generated from the ESXi server.
+ - If I(dest) is a file, the backup file will be saved with that filename. The file extension will always be .tgz.
+ type: path
+ src:
+ description:
+ - The file containing the ESXi configuration that will be restored.
+ type: path
+ state:
+ description:
+ - If C(saved), the .tgz backup bundle will be saved in I(dest).
+ - If C(absent), the host configuration will be reset to default values.
+ - If C(loaded), the backup file in I(src) will be loaded to the ESXi host rewriting the hosts settings.
+ choices: [saved, absent, loaded]
+ type: str
+ required: true
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Save the ESXi configuration locally by authenticating directly against the ESXi host
+ community.vmware.vmware_cfg_backup:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ state: saved
+ dest: /tmp/
+ delegate_to: localhost
+
+- name: Save the ESXi configuration locally by authenticating against the vCenter and selecting the ESXi host
+ community.vmware.vmware_cfg_backup:
+ hostname: '{{ vcenter_hostname }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ state: saved
+ dest: /tmp/
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+dest_file:
+ description: The full path of where the file holding the ESXi configurations was stored
+ returned: changed
+ type: str
+ sample: /tmp/configBundle-esxi.host.domain.tgz
+'''
+
+import os
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, get_all_objs, wait_for_task, PyVmomi
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils._text import to_native
+
+
+class VMwareConfigurationBackup(PyVmomi):
+ def __init__(self, module):
+ super(VMwareConfigurationBackup, self).__init__(module)
+ self.state = self.module.params['state']
+ self.dest = self.module.params['dest']
+ self.src = self.module.params['src']
+ self.hostname = self.module.params['hostname']
+ self.username = self.module.params['username']
+ self.password = self.module.params['password']
+ self.validate_certs = self.module.params['validate_certs']
+ self.esxi_hostname = self.module.params.get('esxi_hostname', None)
+ self.host = self.find_host_system()
+
+ # discard vim returned hostname if endpoint is a standalone ESXi host
+ self.cfg_hurl = self.hostname if (self.content.about.apiType == "HostAgent") else self.host.name
+
+ def find_host_system(self):
+ if self.esxi_hostname:
+ host_system_obj = self.find_hostsystem_by_name(host_name=self.esxi_hostname)
+ if host_system_obj:
+ return host_system_obj
+ else:
+ self.module.fail_json(msg="Failed to find ESXi %s" % self.esxi_hostname)
+
+ host_system = get_all_objs(self.content, [vim.HostSystem])
+ return list(host_system)[0]
+
+ def process_state(self):
+ if self.state == 'saved':
+ self.save_configuration()
+
+ if self.state == 'absent':
+ self.reset_configuration()
+
+ if self.state == 'loaded':
+ self.load_configuration()
+
+ def load_configuration(self):
+ if not os.path.isfile(self.src):
+ self.module.fail_json(msg="Source file {0} does not exist".format(self.src))
+
+ url = self.host.configManager.firmwareSystem.QueryFirmwareConfigUploadURL()
+ url = url.replace('*', self.cfg_hurl)
+ if self.module.params["port"] == 443:
+ url = url.replace("http:", "https:")
+ # find manually the url if there is a redirect because urllib2 -per RFC- doesn't do automatic redirects for PUT requests
+ try:
+ open_url(url=url, method='HEAD', validate_certs=self.validate_certs)
+ except HTTPError as e:
+ url = e.geturl()
+
+ try:
+ with open(self.src, 'rb') as file:
+ data = file.read()
+ open_url(
+ url=url, data=data, method='PUT', validate_certs=self.validate_certs,
+ url_username=self.username, url_password=self.password, force_basic_auth=True)
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ if not self.host.runtime.inMaintenanceMode:
+ self.enter_maintenance()
+ try:
+ self.host.configManager.firmwareSystem.RestoreFirmwareConfiguration(force=True)
+ self.module.exit_json(changed=True)
+ except Exception as e:
+ self.exit_maintenance()
+ self.module.fail_json(msg=to_native(e))
+
+ def reset_configuration(self):
+ if not self.host.runtime.inMaintenanceMode:
+ self.enter_maintenance()
+ try:
+ self.host.configManager.firmwareSystem.ResetFirmwareToFactoryDefaults()
+ self.module.exit_json(changed=True)
+ except Exception as e:
+ self.exit_maintenance()
+ self.module.fail_json(msg=to_native(e))
+
+ def save_configuration(self):
+ url = self.host.configManager.firmwareSystem.BackupFirmwareConfiguration()
+ url = url.replace('*', self.cfg_hurl)
+ if self.module.params["port"] == 443:
+ url = url.replace("http:", "https:")
+ if os.path.isdir(self.dest):
+ filename = url.rsplit('/', 1)[1]
+ self.dest = os.path.join(self.dest, filename)
+ else:
+ filename, file_extension = os.path.splitext(self.dest)
+ if file_extension != ".tgz":
+ self.dest = filename + ".tgz"
+ try:
+ request = open_url(url=url, validate_certs=self.validate_certs)
+ with open(self.dest, "wb") as file:
+ file.write(request.read())
+ self.module.exit_json(changed=True, dest_file=self.dest)
+ except IOError as e:
+ error_msg = "Failed to save %s " % url
+ error_msg += "to %s. Ensure that the dest path exists and is writable. " % self.dest
+ error_msg += "Details: %s" % to_native(e)
+ self.module.fail_json(msg=error_msg)
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ def enter_maintenance(self):
+ try:
+ task = self.host.EnterMaintenanceMode_Task(timeout=15)
+ success, result = wait_for_task(task)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to enter maintenance mode."
+ " Ensure that there are no powered on machines on the host. %s" % to_native(e))
+
+ def exit_maintenance(self):
+ try:
+ task = self.host.ExitMaintenanceMode_Task(timeout=15)
+ success, result = wait_for_task(task)
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to exit maintenance mode due to %s" % to_native(generic_exc))
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(dest=dict(required=False, type='path'),
+ esxi_hostname=dict(required=False, type='str'),
+ src=dict(required=False, type='path'),
+ state=dict(required=True, choices=['saved', 'absent', 'loaded'], type='str')))
+ required_if = [('state', 'saved', ['dest']),
+ ('state', 'loaded', ['src'])]
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=False)
+
+ vmware_cfg_backup = VMwareConfigurationBackup(module)
+ vmware_cfg_backup.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_cluster.py b/ansible_collections/community/vmware/plugins/modules/vmware_cluster.py
new file mode 100644
index 000000000..6007b2f08
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_cluster.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_cluster
+short_description: Manage VMware vSphere clusters
+description:
+ - Adds or removes VMware vSphere clusters.
+ - To manage DRS, HA and VSAN related configurations, use the new modules vmware_cluster_drs, vmware_cluster_ha and vmware_cluster_vsan.
+ - All values and VMware object names are case sensitive.
+author:
+- Joseph Callen (@jcpowermac)
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - The name of the cluster to be managed.
+ type: str
+ required: true
+ datacenter:
+ description:
+ - The name of the datacenter.
+ type: str
+ required: true
+ aliases: [ datacenter_name ]
+ state:
+ description:
+ - Create C(present) or remove C(absent) a VMware vSphere cluster.
+ choices: [ absent, present ]
+ default: present
+ type: str
+seealso:
+- module: community.vmware.vmware_cluster_drs
+- module: community.vmware.vmware_cluster_ha
+- module: community.vmware.vmware_cluster_vsan
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create Cluster
+ community.vmware.vmware_cluster:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter
+ cluster_name: cluster
+ delegate_to: localhost
+
+- name: Delete Cluster
+ community.vmware.vmware_cluster:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter_name: datacenter
+ cluster_name: cluster
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''#
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ find_datacenter_by_name,
+ vmware_argument_spec,
+ wait_for_task)
+from ansible.module_utils._text import to_native
+
+
+class VMwareCluster(PyVmomi):
+ def __init__(self, module):
+ super(VMwareCluster, self).__init__(module)
+ self.cluster_name = module.params['cluster_name']
+ self.datacenter_name = module.params['datacenter']
+ self.desired_state = module.params['state']
+ self.datacenter = None
+ self.cluster = None
+
+ def process_state(self):
+ """
+ Manage internal states of cluster
+ """
+ cluster_states = {
+ 'absent': {
+ 'present': self.state_destroy_cluster,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_cluster,
+ }
+ }
+ current_state = self.check_cluster_configuration()
+ # Based on the desired_state and the current_state call
+ # the appropriate method from the dictionary
+ cluster_states[self.desired_state][current_state]()
+
+ def state_create_cluster(self):
+ """
+ Create cluster with given configuration
+ """
+ try:
+ cluster_config_spec = vim.cluster.ConfigSpecEx()
+ if not self.module.check_mode:
+ self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec)
+ self.module.exit_json(changed=True)
+ except vmodl.fault.InvalidArgument as invalid_args:
+ self.module.fail_json(msg="Cluster configuration specification"
+ " parameter is invalid : %s" % to_native(invalid_args.msg))
+ except vim.fault.InvalidName as invalid_name:
+ self.module.fail_json(msg="'%s' is an invalid name for a"
+ " cluster : %s" % (self.cluster_name,
+ to_native(invalid_name.msg)))
+ except vmodl.fault.NotSupported as not_supported:
+ # This should never happen
+ self.module.fail_json(msg="Trying to create a cluster on an incorrect"
+ " folder object : %s" % to_native(not_supported.msg))
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ # This should never happen either
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to create cluster"
+ " due to generic exception %s" % to_native(generic_exc))
+
+ def state_destroy_cluster(self):
+ """
+ Destroy cluster
+ """
+ changed, result = True, None
+
+ try:
+ if not self.module.check_mode:
+ task = self.cluster.Destroy_Task()
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=result)
+ except vim.fault.VimFault as vim_fault:
+ self.module.fail_json(msg=to_native(vim_fault.msg))
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to destroy cluster"
+ " due to generic exception %s" % to_native(generic_exc))
+
+ def state_exit_unchanged(self):
+ """
+ Exit without any change
+ """
+ self.module.exit_json(changed=False)
+
+ def check_cluster_configuration(self):
+ """
+ Check cluster configuration
+ Returns: 'Present' if cluster exists, else 'absent'
+
+ """
+ try:
+ self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
+ self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter)
+
+ if self.cluster is None:
+ return 'absent'
+
+ return 'present'
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to check configuration"
+ " due to generic exception %s" % to_native(generic_exc))
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ cluster_name=dict(type='str', required=True),
+ datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
+ state=dict(type='str',
+ default='present',
+ choices=['absent', 'present']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ vmware_cluster = VMwareCluster(module)
+ vmware_cluster.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_cluster_dpm.py b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_dpm.py
new file mode 100644
index 000000000..5ba62b2f9
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_dpm.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Swisscom (Schweiz) AG
+# Author(s): Olivia Luetolf <olivia.luetolf@swisscom.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_cluster_dpm
+short_description: Manage Distributed Power Management (DPM) on VMware vSphere clusters
+description:
+ - Manages DPM on VMware vSphere clusters.
+ - All values and VMware object names are case sensitive.
+author:
+- Olivia Luetolf (@olilu)
+options:
+ cluster_name:
+ description:
+ - The name of the cluster to be managed.
+ type: str
+ required: true
+ datacenter:
+ description:
+ - The name of the datacenter.
+ type: str
+ required: true
+ aliases: [ datacenter_name ]
+ enable_dpm:
+ description:
+ - Whether to enable DPM.
+ type: bool
+ default: false
+ default_dpm_behaviour:
+ description:
+ - Whether dpm should be automated or manual
+ type: str
+ default: automated
+ choices: [ automated, manual ]
+ host_power_action_rate:
+ description:
+ - specify host power action rate
+ - 1 is the lowest and 5 the highest
+ type: int
+ default: 3
+ choices: [1, 2, 3, 4, 5]
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Enable DPM
+ community.vmware.vmware_cluster_dpm:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter
+ cluster_name: cluster
+ enable_dpm: true
+ default_dpm_behaviour: automated
+ host_power_action_rate: 2
+ delegate_to: localhost
+
+'''
+
+RETURN = r'''
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ TaskError,
+ find_datacenter_by_name,
+ vmware_argument_spec,
+ wait_for_task
+)
+from ansible.module_utils._text import to_native
+
+
+class VMwareCluster(PyVmomi):
+ def __init__(self, module):
+ super(VMwareCluster, self).__init__(module)
+ self.cluster_name = module.params['cluster_name']
+ self.datacenter_name = module.params['datacenter']
+ self.enable_dpm = module.params['enable_dpm']
+ self.default_dpm_behaviour = module.params['default_dpm_behaviour']
+ self.host_power_action_rate = [5, 4, 3, 2, 1][module.params['host_power_action_rate'] - 1]
+ self.datacenter = None
+ self.cluster = None
+
+ self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
+
+ self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter)
+ if self.cluster is None:
+ self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
+
+ def check_dpm_config_diff(self):
+ """
+ Check DRS configuration diff
+ Returns: True if there is diff, else False
+
+ """
+ dpm_config = self.cluster.configurationEx.dpmConfigInfo
+ change_message = None
+ changes = False
+
+ if dpm_config.enabled != self.enable_dpm:
+ change_message = 'DPM enabled status changes'
+ changes = True
+ return changes, change_message
+ elif self.enable_dpm:
+ if dpm_config.hostPowerActionRate != self.host_power_action_rate or dpm_config.defaultDpmBehavior != self.default_dpm_behaviour:
+ change_message = 'DPM Host Power Action Rate and/or default DPM behaviour change.'
+ changes = True
+ return changes, change_message
+
+ return changes, change_message
+
+ def configure_dpm(self):
+ """
+ Manage DRS configuration
+
+ """
+ changed, result = self.check_dpm_config_diff()
+
+ if changed:
+ if not self.module.check_mode:
+ cluster_config_spec = vim.cluster.ConfigSpecEx()
+ cluster_config_spec.dpmConfig = vim.cluster.DpmConfigInfo()
+ cluster_config_spec.dpmConfig.enabled = self.enable_dpm
+ cluster_config_spec.dpmConfig.defaultDpmBehavior = self.default_dpm_behaviour
+ cluster_config_spec.dpmConfig.hostPowerActionRate = self.host_power_action_rate
+
+ try:
+ task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
+ changed = wait_for_task(task)[0]
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to update cluster"
+ " due to generic exception %s" % to_native(generic_exc))
+ else:
+ changed = True
+
+ self.module.exit_json(changed=changed, result=result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ cluster_name=dict(type='str', required=True),
+ datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
+ enable_dpm=dict(type='bool', default=False),
+ default_dpm_behaviour=dict(type='str', choices=['automated', 'manual'], default='automated'),
+ host_power_action_rate=dict(type='int', choices=[1, 2, 3, 4, 5], default=3)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ vmware_cluster_dpm = VMwareCluster(module)
+ vmware_cluster_dpm.configure_dpm()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_cluster_drs.py b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_drs.py
new file mode 100644
index 000000000..8954ff188
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_drs.py
@@ -0,0 +1,242 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_cluster_drs
+short_description: Manage Distributed Resource Scheduler (DRS) on VMware vSphere clusters
+description:
+ - Manages DRS on VMware vSphere clusters.
+ - All values and VMware object names are case sensitive.
+author:
+- Joseph Callen (@jcpowermac)
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - The name of the cluster to be managed.
+ type: str
+ required: true
+ datacenter:
+ description:
+ - The name of the datacenter.
+ type: str
+ required: true
+ aliases: [ datacenter_name ]
+ enable:
+ description:
+ - Whether to enable DRS.
+ type: bool
+ default: true
+ drs_enable_vm_behavior_overrides:
+ description:
+ - Whether DRS Behavior overrides for individual virtual machines are enabled.
+ - If set to C(true), overrides C(drs_default_vm_behavior).
+ type: bool
+ default: true
+ drs_default_vm_behavior:
+ description:
+ - Specifies the cluster-wide default DRS behavior for virtual machines.
+ - If set to C(partiallyAutomated), vCenter generates recommendations for virtual machine migration and
+ for the placement with a host, then automatically implements placement recommendations at power on.
+ - If set to C(manual), then vCenter generates recommendations for virtual machine migration and
+ for the placement with a host, but does not implement the recommendations automatically.
+ - If set to C(fullyAutomated), then vCenter automates both the migration of virtual machines
+ and their placement with a host at power on.
+ type: str
+ default: fullyAutomated
+ choices: [ fullyAutomated, manual, partiallyAutomated ]
+ drs_vmotion_rate:
+ description:
+ - Threshold for generated ClusterRecommendations ranging from 1 (lowest) to 5 (highest).
+ type: int
+ default: 3
+ choices: [ 1, 2, 3, 4, 5 ]
+ advanced_settings:
+ description:
+ - A dictionary of advanced DRS settings.
+ default: {}
+ type: dict
+ predictive_drs:
+ version_added: '3.3.0'
+ description:
+ - In addition to real-time metrics, DRS will respond to forecasted metrics provided by vRealize Operations Manager.
+ - You must also configure Predictive DRS in a version of vRealize Operations that supports this feature.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Enable DRS
+ community.vmware.vmware_cluster_drs:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter
+ cluster_name: cluster
+ enable: true
+ delegate_to: localhost
+- name: Enable DRS and distribute a more even number of virtual machines across hosts for availability
+ community.vmware.vmware_cluster_drs:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter
+ cluster_name: cluster
+ enable: true
+ advanced_settings:
+ 'TryBalanceVmsPerHost': '1'
+ delegate_to: localhost
+- name: Enable DRS and set default VM behavior to partially automated
+ community.vmware.vmware_cluster_drs:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter_name: DC0
+ cluster_name: "{{ cluster_name }}"
+ enable: true
+ drs_default_vm_behavior: partiallyAutomated
+ delegate_to: localhost
+'''
+
+RETURN = r'''#
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ TaskError,
+ find_datacenter_by_name,
+ vmware_argument_spec,
+ wait_for_task,
+ option_diff,
+)
+from ansible.module_utils._text import to_native
+
+
+class VMwareCluster(PyVmomi):
+ def __init__(self, module):
+ super(VMwareCluster, self).__init__(module)
+ self.cluster_name = module.params['cluster_name']
+ self.datacenter_name = module.params['datacenter']
+ self.enable_drs = module.params['enable']
+ self.datacenter = None
+ self.cluster = None
+ self.drs_vmotion_rate = [5, 4, 3, 2, 1][self.params.get('drs_vmotion_rate') - 1]
+
+ self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
+
+ self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter)
+ if self.cluster is None:
+ self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
+
+ self.advanced_settings = self.params.get('advanced_settings')
+ if self.advanced_settings:
+ self.changed_advanced_settings = option_diff(self.advanced_settings, self.cluster.configurationEx.drsConfig.option)
+ else:
+ self.changed_advanced_settings = None
+
+ def check_drs_config_diff(self):
+ """
+ Check DRS configuration diff
+ Returns: True if there is diff, else False
+ """
+ drs_config = self.cluster.configurationEx.drsConfig
+
+ if drs_config.enabled != self.enable_drs or \
+ drs_config.enableVmBehaviorOverrides != self.params.get('drs_enable_vm_behavior_overrides') or \
+ drs_config.defaultVmBehavior != self.params.get('drs_default_vm_behavior') or \
+ drs_config.vmotionRate != self.drs_vmotion_rate or \
+ self.cluster.configurationEx.proactiveDrsConfig.enabled != self.params.get('predictive_drs'):
+ return True
+
+ if self.changed_advanced_settings:
+ return True
+
+ return False
+
+ def configure_drs(self):
+ """
+ Manage DRS configuration
+ """
+ changed, result = False, None
+
+ if self.check_drs_config_diff():
+ if not self.module.check_mode:
+ cluster_config_spec = vim.cluster.ConfigSpecEx()
+ cluster_config_spec.drsConfig = vim.cluster.DrsConfigInfo()
+ cluster_config_spec.proactiveDrsConfig = vim.cluster.ProactiveDrsConfigInfo()
+ cluster_config_spec.drsConfig.enabled = self.enable_drs
+ cluster_config_spec.drsConfig.enableVmBehaviorOverrides = self.params.get('drs_enable_vm_behavior_overrides')
+ cluster_config_spec.drsConfig.defaultVmBehavior = self.params.get('drs_default_vm_behavior')
+ cluster_config_spec.drsConfig.vmotionRate = self.drs_vmotion_rate
+ cluster_config_spec.proactiveDrsConfig.enabled = self.params.get('predictive_drs')
+
+ if self.changed_advanced_settings:
+ cluster_config_spec.drsConfig.option = self.changed_advanced_settings
+
+ try:
+ task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
+ changed, result = wait_for_task(task)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to update cluster"
+ " due to generic exception %s" % to_native(generic_exc))
+ else:
+ changed = True
+
+ self.module.exit_json(changed=changed, result=result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ cluster_name=dict(type='str', required=True),
+ datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
+ # DRS
+ enable=dict(type='bool', default=True),
+ drs_enable_vm_behavior_overrides=dict(type='bool', default=True),
+ drs_default_vm_behavior=dict(type='str',
+ choices=['fullyAutomated', 'manual', 'partiallyAutomated'],
+ default='fullyAutomated'),
+ drs_vmotion_rate=dict(type='int',
+ choices=[1, 2, 3, 4, 5],
+ default=3),
+ advanced_settings=dict(type='dict', default=dict(), required=False),
+ predictive_drs=dict(type='bool', default=False),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ vmware_cluster_drs = VMwareCluster(module)
+ vmware_cluster_drs.configure_drs()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_cluster_drs_recommendations.py b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_drs_recommendations.py
new file mode 100644
index 000000000..2939aa784
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_drs_recommendations.py
@@ -0,0 +1,119 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2023, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_cluster_drs_recommendations
+version_added: '3.7.0'
+short_description: Apply DRS Recommendations
+description:
+ - Apply DRS Recommendations for Cluster.
+author:
+- Nina Loser (@Nina2244)
+options:
+ cluster_name:
+ description:
+ - The name of the cluster to be managed.
+ type: str
+ required: true
+ datacenter:
+ description:
+ - The name of the datacenter.
+ type: str
+ required: true
+ aliases: [ datacenter_name ]
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Apply DRS Recommendations for Cluster
+ community.vmware.vmware_cluster:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter
+ cluster_name: cluster
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description:
+ - list of the recommendations
+ - What server moved from which host to which host.
+ returned: always
+ type: list
+ sample: ["server1 move from host1 to host2.", "server2 move from host1 to host2."]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ find_datacenter_by_name,
+ vmware_argument_spec,
+ wait_for_task)
+
+
+class VMwareCluster(PyVmomi):
+ def __init__(self, module):
+ super(VMwareCluster, self).__init__(module)
+ self.cluster_name = module.params['cluster_name']
+ self.datacenter_name = module.params['datacenter']
+ self.datacenter = None
+ self.cluster = None
+
+ self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
+
+ self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter)
+ if self.cluster is None:
+ self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
+
+ def recommendations(self):
+ results = []
+ changed = False
+ self.cluster.RefreshRecommendation()
+ if len(self.cluster.recommendation) == 0:
+ self.module.exit_json(changed=changed, result="No recommendations.")
+ else:
+ for index, recommendation in enumerate(self.cluster.recommendation):
+ results.append("%s move from %s to %s." % (recommendation.action[0].target.name,
+ recommendation.action[0].drsMigration.source.name,
+ recommendation.action[0].drsMigration.destination.name))
+ if not self.module.check_mode:
+ task = self.cluster.ApplyRecommendation(recommendation.key)
+ changed = True
+ if index == len(self.cluster.recommendation) - 1 and hasattr(task, 'info'):
+ wait_for_task(task)
+ self.module.exit_json(changed=changed, result=results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ cluster_name=dict(type='str', required=True),
+ datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ vmware_cluster = VMwareCluster(module)
+ vmware_cluster.recommendations()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_cluster_ha.py b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_ha.py
new file mode 100644
index 000000000..d9896caa2
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_ha.py
@@ -0,0 +1,535 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_cluster_ha
+short_description: Manage High Availability (HA) on VMware vSphere clusters
+description:
+ - Manages HA configuration on VMware vSphere clusters.
+ - All values and VMware object names are case sensitive.
+author:
+- Joseph Callen (@jcpowermac)
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - The name of the cluster to be managed.
+ type: str
+ required: true
+ datacenter:
+ description:
+ - The name of the datacenter.
+ type: str
+ required: true
+ aliases: [ datacenter_name ]
+ enable:
+ description:
+ - Whether to enable HA.
+ type: bool
+ default: true
+ ha_host_monitoring:
+ description:
+ - Whether HA restarts virtual machines after a host fails.
+ - If set to C(enabled), HA restarts virtual machines after a host fails.
+ - If set to C(disabled), HA does not restart virtual machines after a host fails.
+ - If C(enable) is set to C(false), then this value is ignored.
+ type: str
+ choices: [ 'enabled', 'disabled' ]
+ default: 'enabled'
+ ha_vm_monitoring:
+ description:
+ - State of virtual machine health monitoring service.
+ - If set to C(vmAndAppMonitoring), HA response to both virtual machine and application heartbeat failure.
+ - If set to C(vmMonitoringDisabled), virtual machine health monitoring is disabled.
+ - If set to C(vmMonitoringOnly), HA response to virtual machine heartbeat failure.
+ - If C(enable) is set to C(false), then this value is ignored.
+ type: str
+ choices: ['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled']
+ default: 'vmMonitoringDisabled'
+ host_isolation_response:
+ description:
+ - Indicates whether or VMs should be powered off if a host determines that it is isolated from the rest of the compute resource.
+ - If set to C(none), do not power off VMs in the event of a host network isolation.
+ - If set to C(powerOff), power off VMs in the event of a host network isolation.
+ - If set to C(shutdown), shut down VMs guest operating system in the event of a host network isolation.
+ type: str
+ choices: ['none', 'powerOff', 'shutdown']
+ default: 'none'
+ slot_based_admission_control:
+ description:
+ - Configure slot based admission control policy.
+ - C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
+ suboptions:
+ failover_level:
+ description:
+ - Number of host failures that should be tolerated.
+ type: int
+ required: true
+ type: dict
+ reservation_based_admission_control:
+ description:
+ - Configure reservation based admission control policy.
+ - C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
+ suboptions:
+ failover_level:
+ description:
+ - Number of host failures that should be tolerated.
+ type: int
+ required: true
+ auto_compute_percentages:
+ description:
+ - By default, C(failover_level) is used to calculate C(cpu_failover_resources_percent) and C(memory_failover_resources_percent).
+ If a user wants to override the percentage values, he has to set this field to false.
+ type: bool
+ default: true
+ cpu_failover_resources_percent:
+ description:
+ - Percentage of CPU resources in the cluster to reserve for failover.
+ Ignored if C(auto_compute_percentages) is not set to false.
+ type: int
+ default: 50
+ memory_failover_resources_percent:
+ description:
+ - Percentage of memory resources in the cluster to reserve for failover.
+ Ignored if C(auto_compute_percentages) is not set to false.
+ type: int
+ default: 50
+ type: dict
+ failover_host_admission_control:
+ description:
+ - Configure dedicated failover hosts.
+ - C(slot_based_admission_control), C(reservation_based_admission_control) and C(failover_host_admission_control) are mutually exclusive.
+ suboptions:
+ failover_hosts:
+ description:
+ - List of dedicated failover hosts.
+ type: list
+ required: true
+ elements: str
+ type: dict
+ ha_vm_failure_interval:
+ description:
+ - The number of seconds after which virtual machine is declared as failed
+ if no heartbeat has been received.
+ - This setting is only valid if C(ha_vm_monitoring) is set to, either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
+ - Unit is seconds.
+ type: int
+ default: 30
+ ha_vm_min_up_time:
+ description:
+ - The number of seconds for the virtual machine's heartbeats to stabilize after
+ the virtual machine has been powered on.
+ - Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
+ - Unit is seconds.
+ type: int
+ default: 120
+ ha_vm_max_failures:
+ description:
+ - Maximum number of failures and automated resets allowed during the time
+ that C(ha_vm_max_failure_window) specifies.
+ - Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
+ type: int
+ default: 3
+ ha_vm_max_failure_window:
+ description:
+ - The number of seconds for the window during which up to C(ha_vm_max_failures) resets
+ can occur before automated responses stop.
+ - Valid only when I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
+ - Unit is seconds.
+ - Default specifies no failure window.
+ type: int
+ default: -1
+ ha_restart_priority:
+ description:
+ - Priority HA gives to a virtual machine if sufficient capacity is not available
+ to power on all failed virtual machines.
+ - Valid only if I(ha_vm_monitoring) is set to either C(vmAndAppMonitoring) or C(vmMonitoringOnly).
+ - If set to C(disabled), then HA is disabled for this virtual machine.
+ - If set to C(high), then virtual machine with this priority have a higher chance of powering on after a failure,
+ when there is insufficient capacity on hosts to meet all virtual machine needs.
+ - If set to C(medium), then virtual machine with this priority have an intermediate chance of powering on after a failure,
+ when there is insufficient capacity on hosts to meet all virtual machine needs.
+ - If set to C(low), then virtual machine with this priority have a lower chance of powering on after a failure,
+ when there is insufficient capacity on hosts to meet all virtual machine needs.
+ type: str
+ default: 'medium'
+ choices: [ 'disabled', 'high', 'low', 'medium' ]
+ advanced_settings:
+ description:
+ - A dictionary of advanced HA settings.
+ default: {}
+ type: dict
+ apd_response:
+ description:
+ - VM storage protection setting for storage failures categorized as All Paths Down (APD).
+ type: str
+ default: 'warning'
+ choices: [ 'disabled', 'warning', 'restartConservative', 'restartAggressive' ]
+ apd_delay:
+ description:
+ - The response recovery delay time in sec for storage failures categorized as All Paths Down (APD).
+ - Only set if C(apd_response) is C(restartConservative) or C(restartAggressive).
+ type: int
+ default: 180
+ version_added: '2.9.0'
+ apd_reaction:
+ description:
+ - VM response recovery reaction for storage failures categorized as All Paths Down (APD).
+ - Only set if C(apd_response) is C(restartConservative) or C(restartAggressive).
+ type: str
+ default: 'reset'
+ choices: [ 'reset', 'none' ]
+ version_added: '2.9.0'
+ pdl_response:
+ description:
+ - VM storage protection setting for storage failures categorized as Permenant Device Loss (PDL).
+ type: str
+ default: 'warning'
+ choices: [ 'disabled', 'warning', 'restartAggressive' ]
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Enable HA without admission control
+ community.vmware.vmware_cluster_ha:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter
+ cluster_name: cluster
+ enable: true
+ delegate_to: localhost
+
+- name: Enable HA and VM monitoring without admission control
+ community.vmware.vmware_cluster_ha:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter_name: DC0
+ cluster_name: "{{ cluster_name }}"
+ enable: true
+ ha_vm_monitoring: vmMonitoringOnly
+ delegate_to: localhost
+
+- name: Enable HA with admission control reserving 50% of resources for HA
+ community.vmware.vmware_cluster_ha:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter
+ cluster_name: cluster
+ enable: true
+ reservation_based_admission_control:
+ auto_compute_percentages: false
+ failover_level: 1
+ cpu_failover_resources_percent: 50
+ memory_failover_resources_percent: 50
+ delegate_to: localhost
+'''
+
+RETURN = r'''#
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ TaskError,
+ find_datacenter_by_name,
+ vmware_argument_spec,
+ wait_for_task,
+ option_diff,
+)
+from ansible.module_utils._text import to_native
+
+
+class VMwareCluster(PyVmomi):
+ def __init__(self, module):
+ super(VMwareCluster, self).__init__(module)
+ self.cluster_name = module.params['cluster_name']
+ self.datacenter_name = module.params['datacenter']
+ self.enable_ha = module.params['enable']
+ self.datacenter = None
+ self.cluster = None
+ self.host_isolation_response = getattr(vim.cluster.DasVmSettings.IsolationResponse, self.params.get('host_isolation_response'))
+
+ if self.enable_ha and (
+ self.params.get("slot_based_admission_control")
+ or self.params.get("reservation_based_admission_control")
+ or self.params.get("failover_host_admission_control")
+ ):
+ self.ha_admission_control = True
+ else:
+ self.ha_admission_control = False
+
+ self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
+
+ self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter)
+ if self.cluster is None:
+ self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
+
+ self.advanced_settings = self.params.get('advanced_settings')
+ if self.advanced_settings:
+ self.changed_advanced_settings = option_diff(self.advanced_settings, self.cluster.configurationEx.dasConfig.option, False)
+ else:
+ self.changed_advanced_settings = None
+
+ def get_failover_hosts(self):
+ """
+ Get failover hosts for failover_host_admission_control policy
+ Returns: List of ESXi hosts sorted by name
+
+ """
+ policy = self.params.get('failover_host_admission_control')
+ hosts = []
+ all_hosts = dict((h.name, h) for h in self.get_all_hosts_by_cluster(self.cluster_name))
+ for host in policy.get('failover_hosts'):
+ if host in all_hosts:
+ hosts.append(all_hosts.get(host))
+ else:
+ self.module.fail_json(msg="Host %s is not a member of cluster %s." % (host, self.cluster_name))
+ hosts.sort(key=lambda h: h.name)
+ return hosts
+
+ def check_ha_config_diff(self):
+ """
+ Check HA configuration diff
+ Returns: True if there is diff, else False
+
+ """
+ das_config = self.cluster.configurationEx.dasConfig
+ if das_config.enabled != self.enable_ha:
+ return True
+
+ if self.enable_ha and (
+ das_config.vmMonitoring != self.params.get("ha_vm_monitoring")
+ or das_config.hostMonitoring != self.params.get("ha_host_monitoring")
+ or das_config.admissionControlEnabled != self.ha_admission_control
+ or das_config.defaultVmSettings.restartPriority
+ != self.params.get("ha_restart_priority")
+ or das_config.defaultVmSettings.isolationResponse
+ != self.host_isolation_response
+ or das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring
+ != self.params.get("ha_vm_monitoring")
+ or das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval
+ != self.params.get("ha_vm_failure_interval")
+ or das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime
+ != self.params.get("ha_vm_min_up_time")
+ or das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures
+ != self.params.get("ha_vm_max_failures")
+ or das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow
+ != self.params.get("ha_vm_max_failure_window")
+ or das_config.defaultVmSettings.vmComponentProtectionSettings.vmStorageProtectionForAPD
+ != self.params.get("apd_response")
+ or das_config.defaultVmSettings.vmComponentProtectionSettings.vmStorageProtectionForPDL
+ != self.params.get("pdl_response")
+ ):
+ return True
+
+ if self.ha_admission_control:
+ if self.params.get('slot_based_admission_control'):
+ policy = self.params.get('slot_based_admission_control')
+ if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverLevelAdmissionControlPolicy) or \
+ das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'):
+ return True
+ elif self.params.get('reservation_based_admission_control'):
+ policy = self.params.get('reservation_based_admission_control')
+ auto_compute_percentages = policy.get('auto_compute_percentages')
+ if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverResourcesAdmissionControlPolicy) or \
+ das_config.admissionControlPolicy.autoComputePercentages != auto_compute_percentages or \
+ das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'):
+ return True
+ if not auto_compute_percentages:
+ if das_config.admissionControlPolicy.cpuFailoverResourcesPercent != policy.get('cpu_failover_resources_percent') or \
+ das_config.admissionControlPolicy.memoryFailoverResourcesPercent != policy.get('memory_failover_resources_percent'):
+ return True
+ elif self.params.get('failover_host_admission_control'):
+ policy = self.params.get('failover_host_admission_control')
+ if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverHostAdmissionControlPolicy):
+ return True
+ das_config.admissionControlPolicy.failoverHosts.sort(key=lambda h: h.name)
+ if das_config.admissionControlPolicy.failoverHosts != self.get_failover_hosts():
+ return True
+
+ if self.params.get('apd_response') != 'disabled' and self.params.get('apd_response') != 'warning':
+ if das_config.defaultVmSettings.vmComponentProtectionSettings.vmTerminateDelayForAPDSec != self.params.get('apd_delay'):
+ return True
+ if das_config.defaultVmSettings.vmComponentProtectionSettings.vmReactionOnAPDCleared != self.params.get('apd_reaction'):
+ return True
+
+ if self.changed_advanced_settings:
+ return True
+
+ return False
+
+ def configure_ha(self):
+ """
+ Manage HA Configuration
+
+ """
+ changed, result = False, None
+
+ if self.check_ha_config_diff():
+ if not self.module.check_mode:
+ cluster_config_spec = vim.cluster.ConfigSpecEx()
+ cluster_config_spec.dasConfig = vim.cluster.DasConfigInfo()
+ cluster_config_spec.dasConfig.enabled = self.enable_ha
+
+ if self.enable_ha:
+ vm_tool_spec = vim.cluster.VmToolsMonitoringSettings()
+ vm_tool_spec.enabled = True
+ vm_tool_spec.vmMonitoring = self.params.get('ha_vm_monitoring')
+ vm_tool_spec.failureInterval = self.params.get('ha_vm_failure_interval')
+ vm_tool_spec.minUpTime = self.params.get('ha_vm_min_up_time')
+ vm_tool_spec.maxFailures = self.params.get('ha_vm_max_failures')
+ vm_tool_spec.maxFailureWindow = self.params.get('ha_vm_max_failure_window')
+
+ das_vm_config = vim.cluster.DasVmSettings()
+ das_vm_config.restartPriority = self.params.get('ha_restart_priority')
+ das_vm_config.isolationResponse = self.host_isolation_response
+ das_vm_config.vmToolsMonitoringSettings = vm_tool_spec
+
+ das_vm_config.vmComponentProtectionSettings = vim.cluster.VmComponentProtectionSettings()
+ das_vm_config.vmComponentProtectionSettings.vmStorageProtectionForAPD = self.params.get('apd_response')
+ if self.params.get('apd_response') != 'disabled' and self.params.get('apd_response') != 'warning':
+ das_vm_config.vmComponentProtectionSettings.vmTerminateDelayForAPDSec = self.params.get('apd_delay')
+ das_vm_config.vmComponentProtectionSettings.vmReactionOnAPDCleared = self.params.get('apd_reaction')
+ das_vm_config.vmComponentProtectionSettings.vmStorageProtectionForPDL = self.params.get('pdl_response')
+ if (self.params['apd_response'] != "disabled" or self.params['pdl_response'] != "disabled"):
+ cluster_config_spec.dasConfig.vmComponentProtecting = 'enabled'
+ else:
+ cluster_config_spec.dasConfig.vmComponentProtecting = 'disabled'
+
+ cluster_config_spec.dasConfig.defaultVmSettings = das_vm_config
+
+ cluster_config_spec.dasConfig.admissionControlEnabled = self.ha_admission_control
+
+ if self.ha_admission_control:
+ if self.params.get('slot_based_admission_control'):
+ cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
+ policy = self.params.get('slot_based_admission_control')
+ cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
+ elif self.params.get('reservation_based_admission_control'):
+ cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverResourcesAdmissionControlPolicy()
+ policy = self.params.get('reservation_based_admission_control')
+ auto_compute_percentages = policy.get('auto_compute_percentages')
+ cluster_config_spec.dasConfig.admissionControlPolicy.autoComputePercentages = auto_compute_percentages
+ cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level')
+ if not auto_compute_percentages:
+ cluster_config_spec.dasConfig.admissionControlPolicy.cpuFailoverResourcesPercent = \
+ policy.get('cpu_failover_resources_percent')
+ cluster_config_spec.dasConfig.admissionControlPolicy.memoryFailoverResourcesPercent = \
+ policy.get('memory_failover_resources_percent')
+ elif self.params.get('failover_host_admission_control'):
+ cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverHostAdmissionControlPolicy()
+ policy = self.params.get('failover_host_admission_control')
+ cluster_config_spec.dasConfig.admissionControlPolicy.failoverHosts = self.get_failover_hosts()
+
+ cluster_config_spec.dasConfig.hostMonitoring = self.params.get('ha_host_monitoring')
+ cluster_config_spec.dasConfig.vmMonitoring = self.params.get('ha_vm_monitoring')
+
+ if self.changed_advanced_settings:
+ cluster_config_spec.dasConfig.option = self.changed_advanced_settings
+
+ try:
+ task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
+ changed, result = wait_for_task(task)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to update cluster"
+ " due to generic exception %s" % to_native(generic_exc))
+ else:
+ changed = True
+
+ self.module.exit_json(changed=changed, result=result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ cluster_name=dict(type='str', required=True),
+ datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
+ # HA
+ enable=dict(type='bool', default=True),
+ ha_host_monitoring=dict(type='str',
+ default='enabled',
+ choices=['enabled', 'disabled']),
+ host_isolation_response=dict(type='str',
+ default='none',
+ choices=['none', 'powerOff', 'shutdown']),
+ advanced_settings=dict(type='dict', default=dict(), required=False),
+ # HA VM Monitoring related parameters
+ ha_vm_monitoring=dict(type='str',
+ choices=['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled'],
+ default='vmMonitoringDisabled'),
+ ha_vm_failure_interval=dict(type='int', default=30),
+ ha_vm_min_up_time=dict(type='int', default=120),
+ ha_vm_max_failures=dict(type='int', default=3),
+ ha_vm_max_failure_window=dict(type='int', default=-1),
+
+ ha_restart_priority=dict(type='str',
+ choices=['high', 'low', 'medium', 'disabled'],
+ default='medium'),
+ # HA Admission Control related parameters
+ slot_based_admission_control=dict(type='dict', options=dict(
+ failover_level=dict(type='int', required=True),
+ )),
+ reservation_based_admission_control=dict(type='dict', options=dict(
+ auto_compute_percentages=dict(type='bool', default=True),
+ failover_level=dict(type='int', required=True),
+ cpu_failover_resources_percent=dict(type='int', default=50),
+ memory_failover_resources_percent=dict(type='int', default=50),
+ )),
+ failover_host_admission_control=dict(type='dict', options=dict(
+ failover_hosts=dict(type='list', elements='str', required=True),
+ )),
+ apd_response=dict(type='str',
+ choices=['disabled', 'warning', 'restartConservative', 'restartAggressive'],
+ default='warning'),
+ apd_delay=dict(type='int', default=180),
+ apd_reaction=dict(type='str',
+ choices=['reset', 'none'],
+ default='reset'),
+ pdl_response=dict(type='str',
+ choices=['disabled', 'warning', 'restartAggressive'],
+ default='warning'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['slot_based_admission_control', 'reservation_based_admission_control', 'failover_host_admission_control']
+ ]
+ )
+
+ vmware_cluster_ha = VMwareCluster(module)
+ vmware_cluster_ha.configure_ha()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_cluster_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_info.py
new file mode 100644
index 000000000..b3eded647
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_info.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_cluster_info
+short_description: Gather info about clusters available in given vCenter
+description:
+ - This module can be used to gather information about clusters in VMWare infrastructure.
+ - All values and VMware object names are case sensitive.
+author:
+ - Abhijeet Kasurde (@Akasurde)
+ - Christian Neugum (@digifuchsi)
+options:
+ datacenter:
+ description:
+ - Datacenter to search for cluster/s.
+ - This parameter is required, if C(cluster_name) is not supplied.
+ required: false
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - If set, information of this cluster will be returned.
+ - This parameter is required, if C(datacenter) is not supplied.
+ required: false
+ type: str
+ show_tag:
+ description:
+ - Tags related to cluster are shown if set to C(true).
+ default: false
+ type: bool
+ schema:
+ description:
+ - Specify the output schema desired.
+ - The 'summary' output schema is the legacy output from the module.
+ - The 'vsphere' output schema is the vSphere API class definition which requires pyvmomi>6.7.1.
+ choices: ['summary', 'vsphere']
+ default: 'summary'
+ type: str
+ properties:
+ description:
+ - Specify the properties to retrieve.
+ - 'Example:'
+ - ' properties: ['
+ - ' "name",'
+ - ' "configuration.dasConfig.enabled",'
+ - ' "summary.totalCpu"'
+ - ' ]'
+ - Only valid when C(schema) is C(vsphere).
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather cluster info from given datacenter
+ community.vmware.vmware_cluster_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: ha-datacenter
+ delegate_to: localhost
+ register: cluster_info
+
+- name: Gather info from datacenter about specific cluster
+ community.vmware.vmware_cluster_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: DC0_C0
+ delegate_to: localhost
+ register: cluster_info
+
+- name: Gather info from datacenter about specific cluster with tags
+ community.vmware.vmware_cluster_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: DC0_C0
+ show_tag: true
+ delegate_to: localhost
+ register: cluster_info
+
+- name: Gather some info from a cluster using the vSphere API output schema
+ vmware_cluster_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: DC0_C0
+ schema: vsphere
+ properties:
+ - name
+ - configuration.dasConfig.enabled
+ - summary.totalCpu
+ delegate_to: localhost
+ register: cluster_info
+'''
+
+RETURN = r'''
+clusters:
+ description:
+ - metadata about the available clusters
+ - datacenter added in the return values from version 1.6.0
+ returned: always
+ type: dict
+ sample: {
+ "DC0_C0": {
+ "datacenter": "DC0",
+ "moid": "domain-c9",
+ "drs_default_vm_behavior": null,
+ "drs_enable_vm_behavior_overrides": null,
+ "drs_vmotion_rate": null,
+ "enable_ha": null,
+ "enabled_drs": true,
+ "enabled_vsan": false,
+ "ha_admission_control_enabled": null,
+ "ha_failover_level": null,
+ "ha_host_monitoring": null,
+ "ha_restart_priority": null,
+ "ha_vm_failure_interval": null,
+ "ha_vm_max_failure_window": null,
+ "ha_vm_max_failures": null,
+ "ha_vm_min_up_time": null,
+ "ha_vm_monitoring": null,
+ "ha_vm_tools_monitoring": null,
+ "vsan_auto_claim_storage": false,
+ "hosts": [
+ {
+ "name": "esxi01.vsphere.local",
+ "folder": "/DC0/host/DC0_C0",
+ },
+ {
+ "name": "esxi02.vsphere.local",
+ "folder": "/DC0/host/DC0_C0",
+ },
+ {
+ "name": "esxi03.vsphere.local",
+ "folder": "/DC0/host/DC0_C0",
+ },
+ {
+ "name": "esxi04.vsphere.local",
+ "folder": "/DC0/host/DC0_C0",
+ },
+ ],
+ "resource_summary": {
+ "cpuCapacityMHz": 4224,
+ "cpuUsedMHz": 87,
+ "memCapacityMB": 6139,
+ "memUsedMB": 1254,
+ "pMemAvailableMB": 0,
+ "pMemCapacityMB": 0,
+ "storageCapacityMB": 33280,
+ "storageUsedMB": 19953
+ },
+ "tags": [
+ {
+ "category_id": "urn:vmomi:InventoryServiceCategory:9fbf83de-7903-442e-8004-70fd3940297c:GLOBAL",
+ "category_name": "sample_cluster_cat_0001",
+ "description": "",
+ "id": "urn:vmomi:InventoryServiceTag:93d680db-b3a6-4834-85ad-3e9516e8fee8:GLOBAL",
+ "name": "sample_cluster_tag_0001"
+ }
+ ],
+ },
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import unquote
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_datacenter_by_name, find_cluster_by_name,\
+ get_parent_datacenter
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+
+
+class VmwreClusterInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwreClusterInfoManager, self).__init__(module)
+ datacenter = self.params.get('datacenter')
+ cluster_name = self.params.get('cluster_name')
+ self.schema = self.params.get('schema')
+ self.properties = self.params.get('properties')
+ self.cluster_objs = []
+ if datacenter:
+ datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter)
+ if datacenter_obj is None:
+ self.module.fail_json(msg="Failed to find datacenter '%s'" % datacenter)
+ self.cluster_objs = self.get_all_cluster_objs(parent=datacenter_obj)
+ elif cluster_name:
+ cluster_obj = find_cluster_by_name(self.content, cluster_name=cluster_name)
+ if cluster_obj is None:
+ self.module.fail_json(msg="Failed to find cluster '%s'" % cluster_name)
+
+ self.cluster_objs = [cluster_obj]
+
+ def get_all_cluster_objs(self, parent):
+ """
+ Get all cluster managed objects from given parent object
+ Args:
+ parent: Managed objected of datacenter or host folder
+
+ Returns: List of host managed objects
+
+ """
+ cluster_objs = []
+ if isinstance(parent, vim.Datacenter):
+ folder = parent.hostFolder
+ else:
+ folder = parent
+
+ for child in folder.childEntity:
+ if isinstance(child, vim.Folder):
+ cluster_objs = cluster_objs + self.get_all_cluster_objs(child)
+ if isinstance(child, vim.ClusterComputeResource):
+ cluster_objs.append(child)
+ return cluster_objs
+
+ def gather_cluster_info(self):
+ """
+ Gather information about cluster
+ """
+ results = dict(changed=False, clusters=dict())
+
+ if self.schema == 'summary':
+ for cluster in self.cluster_objs:
+ # Default values
+ ha_failover_level = None
+ ha_restart_priority = None
+ ha_vm_tools_monitoring = None
+ ha_vm_min_up_time = None
+ ha_vm_max_failures = None
+ ha_vm_max_failure_window = None
+ ha_vm_failure_interval = None
+ enabled_vsan = False
+ vsan_auto_claim_storage = False
+ hosts = []
+
+ # Hosts
+ for host in cluster.host:
+ hosts.append({
+ 'name': host.name,
+ 'folder': self.get_vm_path(self.content, host),
+ })
+
+ # HA
+ das_config = cluster.configurationEx.dasConfig
+ if das_config.admissionControlPolicy:
+ ha_failover_level = das_config.admissionControlPolicy.failoverLevel
+ if das_config.defaultVmSettings:
+ ha_restart_priority = das_config.defaultVmSettings.restartPriority
+ ha_vm_tools_monitoring = das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring
+ ha_vm_min_up_time = das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime
+ ha_vm_max_failures = das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures
+ ha_vm_max_failure_window = das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow
+ ha_vm_failure_interval = das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval
+
+ # DRS
+ drs_config = cluster.configurationEx.drsConfig
+
+ # VSAN
+ if hasattr(cluster.configurationEx, 'vsanConfigInfo'):
+ vsan_config = cluster.configurationEx.vsanConfigInfo
+ enabled_vsan = vsan_config.enabled
+ vsan_auto_claim_storage = vsan_config.defaultConfig.autoClaimStorage
+
+ tag_info = []
+ if self.params.get('show_tag'):
+ vmware_client = VmwareRestClient(self.module)
+ tag_info = vmware_client.get_tags_for_cluster(cluster_mid=cluster._moId)
+
+ resource_summary = self.to_json(cluster.GetResourceUsage())
+ if '_vimtype' in resource_summary:
+ del resource_summary['_vimtype']
+
+ results['clusters'][unquote(cluster.name)] = dict(
+ hosts=hosts,
+ enable_ha=das_config.enabled,
+ ha_failover_level=ha_failover_level,
+ ha_vm_monitoring=das_config.vmMonitoring,
+ ha_host_monitoring=das_config.hostMonitoring,
+ ha_admission_control_enabled=das_config.admissionControlEnabled,
+ ha_restart_priority=ha_restart_priority,
+ ha_vm_tools_monitoring=ha_vm_tools_monitoring,
+ ha_vm_min_up_time=ha_vm_min_up_time,
+ ha_vm_max_failures=ha_vm_max_failures,
+ ha_vm_max_failure_window=ha_vm_max_failure_window,
+ ha_vm_failure_interval=ha_vm_failure_interval,
+ enabled_drs=drs_config.enabled,
+ drs_enable_vm_behavior_overrides=drs_config.enableVmBehaviorOverrides,
+ drs_default_vm_behavior=drs_config.defaultVmBehavior,
+ drs_vmotion_rate=drs_config.vmotionRate,
+ enabled_vsan=enabled_vsan,
+ vsan_auto_claim_storage=vsan_auto_claim_storage,
+ tags=tag_info,
+ resource_summary=resource_summary,
+ moid=cluster._moId,
+ datacenter=get_parent_datacenter(cluster).name
+ )
+ else:
+ for cluster in self.cluster_objs:
+ results['clusters'][unquote(cluster.name)] = self.to_json(cluster, self.properties)
+
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(type='str'),
+ cluster_name=dict(type='str'),
+ show_tag=dict(type='bool', default=False),
+ schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'),
+ properties=dict(type='list', elements='str')
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'datacenter'],
+ ],
+ supports_check_mode=True,
+ )
+
+ pyv = VmwreClusterInfoManager(module)
+ pyv.gather_cluster_info()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_cluster_vcls.py b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_vcls.py
new file mode 100644
index 000000000..f553adcdb
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_vcls.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_cluster_vcls
+short_description: Override the default vCLS (vSphere Cluster Services) VM disk placement for this cluster.
+description:
+ - Override the default vCLS VM disk placement for this cluster.
+ - Some datastores cannot be selected for vCLS 'Allowed' as they are blocked by solutions as SRM or vSAN maintenance mode where vCLS cannot be configured.
+ - All values and VMware object names are case sensitive.
+author:
+- Joseph Callen (@jcpowermac)
+- Nina Loser (@Nina2244)
+options:
+ cluster_name:
+ description:
+ - The name of the cluster to be managed.
+ type: str
+ required: true
+ datacenter:
+ description:
+ - The name of the datacenter.
+ type: str
+ required: true
+ aliases: [ datacenter_name ]
+ allowed_datastores:
+ description:
+ - List of the allowed Datastores.
+ - If there is one more in the current List it will be removed.
+ type: list
+ elements: str
+ required: true
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Set Allowed vCLS Datastores
+ community.vmware.vmware_cluster_vcls:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter
+ cluster_name: cluster
+ allowed_datastores:
+ - ds1
+ - ds2
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: information about performed operation
+ returned: always
+ type: str
+ sample: {
+ "result": null,
+ "Added_AllowedDatastores": [ds2],
+ "Removed_AllowedDatastores": [ds3]
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ TaskError,
+ find_datacenter_by_name,
+ find_datastore_by_name,
+ vmware_argument_spec,
+ wait_for_task,
+)
+from ansible.module_utils._text import to_native
+
+
+class VMwareCluster(PyVmomi):
+ def __init__(self, module):
+ super(VMwareCluster, self).__init__(module)
+ self.cluster_name = module.params['cluster_name']
+ self.datacenter_name = module.params['datacenter']
+ self.datacenter = None
+ self.cluster = None
+
+ self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
+
+ self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter)
+ if self.cluster is None:
+ self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
+
+ self.allowedDatastores_names = module.params['allowed_datastores']
+
+ def check_vCLS_config_diff(self):
+ """
+ Check vCLS configuration diff
+ Returns: True and all to add and to remove allowed and not allowed Datastores if there is diff, else False
+
+ """
+ if hasattr(self.cluster.configurationEx, 'systemVMsConfig'):
+ vCLS_config = self.cluster.configurationEx.systemVMsConfig
+ else:
+ return False, self.allowedDatastores_names, None
+ changed = False
+
+ # Get List currently of allowed Datastore Names
+ currentAllowedDatastores = []
+ for ds in vCLS_config.allowedDatastores:
+ currentAllowedDatastores.append(ds.name)
+
+ # Get the to add and to remove allowed and not allowed Datastores
+ toAddAllowedDatastores = list(set(self.allowedDatastores_names) - set(currentAllowedDatastores))
+ toRemoveAllowedDatastores = list(set(currentAllowedDatastores) - set(self.allowedDatastores_names))
+
+ if len(toAddAllowedDatastores) != 0 or len(toRemoveAllowedDatastores) != 0:
+ changed = True
+
+ return changed, toAddAllowedDatastores, toRemoveAllowedDatastores
+
+ def configure_vCLS(self):
+ """
+ Manage DRS configuration
+
+ """
+ result = None
+ changed, toAddAllowedDatastores, toRemoveAllowedDatastores = self.check_vCLS_config_diff()
+
+ if changed:
+ if not self.module.check_mode:
+ cluster_config_spec = vim.cluster.ConfigSpecEx()
+ cluster_config_spec.systemVMsConfig = vim.cluster.SystemVMsConfigSpec()
+
+ cluster_config_spec.systemVMsConfig.allowedDatastores = []
+
+ # Build the Spec
+ for ds_name in toAddAllowedDatastores:
+ specSystemVMsConfigAllowedDatastore = vim.cluster.DatastoreUpdateSpec()
+ specSystemVMsConfigAllowedDatastore.datastore = find_datastore_by_name(self.content, ds_name, self.datacenter)
+ specSystemVMsConfigAllowedDatastore.operation = 'add'
+ cluster_config_spec.systemVMsConfig.allowedDatastores.append(specSystemVMsConfigAllowedDatastore)
+
+ for ds_name in toRemoveAllowedDatastores:
+ specSystemVMsConfigAllowedDatastore = vim.cluster.DatastoreUpdateSpec()
+ specSystemVMsConfigAllowedDatastore.removeKey = find_datastore_by_name(self.content, ds_name, self.datacenter)
+ specSystemVMsConfigAllowedDatastore.operation = 'remove'
+ cluster_config_spec.systemVMsConfig.allowedDatastores.append(specSystemVMsConfigAllowedDatastore)
+
+ try:
+ task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
+ changed, result = wait_for_task(task)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to update cluster"
+ " due to generic exception %s" % to_native(generic_exc))
+ else:
+ changed = True
+
+ results = dict(changed=changed)
+ results['result'] = result
+ results['Added_AllowedDatastores'] = toAddAllowedDatastores
+ results['Removed_AllowedDatastores'] = toRemoveAllowedDatastores
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ cluster_name=dict(type='str', required=True),
+ datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
+ # vCLS
+ allowed_datastores=dict(type='list', elements='str', required=True)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ vmware_cluster_vCLS = VMwareCluster(module)
+ vmware_cluster_vCLS.configure_vCLS()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_cluster_vsan.py b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_vsan.py
new file mode 100644
index 000000000..a78aa1575
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_cluster_vsan.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_cluster_vsan
+short_description: Manages virtual storage area network (vSAN) configuration on VMware vSphere clusters
+description:
+ - Manages vSAN on VMware vSphere clusters.
+ - All values and VMware object names are case sensitive.
+author:
+- Joseph Callen (@jcpowermac)
+- Abhijeet Kasurde (@Akasurde)
+- Mario Lenz (@mariolenz)
+requirements:
+ - vSAN Management SDK, which needs to be downloaded from VMware and installed manually.
+options:
+ cluster_name:
+ description:
+ - The name of the cluster to be managed.
+ type: str
+ required: true
+ datacenter:
+ description:
+ - The name of the datacenter.
+ type: str
+ required: true
+ aliases: [ datacenter_name ]
+ enable:
+ description:
+ - Whether to enable vSAN.
+ type: bool
+ default: true
+ vsan_auto_claim_storage:
+ description:
+ - Whether the VSAN service is configured to automatically claim local storage
+ on VSAN-enabled hosts in the cluster.
+ type: bool
+ default: false
+ advanced_options:
+ description:
+ - Advanced VSAN Options.
+ suboptions:
+ automatic_rebalance:
+ description:
+ - If enabled, vSAN automatically rebalances (moves the data among disks) when a capacity disk fullness hits proactive rebalance threshold.
+ type: bool
+ disable_site_read_locality:
+ description:
+ - For vSAN stretched clusters, reads to vSAN objects occur on the site the VM resides on.
+ - Setting to C(true) will force reads across all mirrors.
+ type: bool
+ large_cluster_support:
+ description:
+ - Allow > 32 VSAN hosts per cluster; if this is changed on an existing vSAN cluster, all hosts are required to reboot to apply this change.
+ type: bool
+ object_repair_timer:
+ description:
+ - Delay time in minutes for VSAN to wait for the absent component to come back before starting to repair it.
+ type: int
+ thin_swap:
+ description:
+ - When C(enabled), swap objects would not reserve 100% space of their size on vSAN datastore.
+ type: bool
+ type: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Enable vSAN
+ community.vmware.vmware_cluster_vsan:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter
+ cluster_name: cluster
+ enable: true
+ delegate_to: localhost
+
+- name: Enable vSAN and automatic rebalancing
+ community.vmware.vmware_cluster_vsan:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: datacenter
+ cluster_name: cluster
+ enable: true
+ advanced_options:
+ automatic_rebalance: true
+ delegate_to: localhost
+
+- name: Enable vSAN and claim storage automatically
+ community.vmware.vmware_cluster_vsan:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter_name: DC0
+ cluster_name: "{{ cluster_name }}"
+ enable: true
+ vsan_auto_claim_storage: true
+ delegate_to: localhost
+'''
+
+RETURN = r'''#
+'''
+
+import traceback
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+VSANPYTHONSDK_IMP_ERR = None
+try:
+ import vsanapiutils
+ HAS_VSANPYTHONSDK = True
+except ImportError:
+ VSANPYTHONSDK_IMP_ERR = traceback.format_exc()
+ HAS_VSANPYTHONSDK = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ TaskError,
+ find_datacenter_by_name,
+ vmware_argument_spec,
+ wait_for_task)
+from ansible.module_utils._text import to_native
+
+
+class VMwareCluster(PyVmomi):
+ def __init__(self, module):
+ super(VMwareCluster, self).__init__(module)
+ self.cluster_name = module.params['cluster_name']
+ self.datacenter_name = module.params['datacenter']
+ self.enable_vsan = module.params['enable']
+ self.datacenter = None
+ self.cluster = None
+ self.advanced_options = None
+
+ self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
+
+ self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter)
+ if self.cluster is None:
+ self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name)
+
+ if module.params['advanced_options'] is not None:
+ self.advanced_options = module.params['advanced_options']
+
+ client_stub = self.si._GetStub()
+ ssl_context = client_stub.schemeArgs.get('context')
+ apiVersion = vsanapiutils.GetLatestVmodlVersion(module.params['hostname'])
+ vcMos = vsanapiutils.GetVsanVcMos(client_stub, context=ssl_context, version=apiVersion)
+ self.vsanClusterConfigSystem = vcMos['vsan-cluster-config-system']
+
+ def check_vsan_config_diff(self):
+ """
+ Check VSAN configuration diff
+ Returns: True if there is diff, else False
+
+ """
+ vsan_config = self.cluster.configurationEx.vsanConfigInfo
+
+ if vsan_config.enabled != self.enable_vsan or \
+ vsan_config.defaultConfig.autoClaimStorage != self.params.get('vsan_auto_claim_storage'):
+ return True
+
+ if self.advanced_options is not None:
+ vsan_config_info = self.vsanClusterConfigSystem.GetConfigInfoEx(self.cluster).extendedConfig
+ if self.advanced_options['automatic_rebalance'] is not None and \
+ self.advanced_options['automatic_rebalance'] != vsan_config_info.proactiveRebalanceInfo.enabled:
+ return True
+ if self.advanced_options['disable_site_read_locality'] is not None and \
+ self.advanced_options['disable_site_read_locality'] != vsan_config_info.disableSiteReadLocality:
+ return True
+ if self.advanced_options['large_cluster_support'] is not None and \
+ self.advanced_options['large_cluster_support'] != vsan_config_info.largeScaleClusterSupport:
+ return True
+ if self.advanced_options['object_repair_timer'] is not None and \
+ self.advanced_options['object_repair_timer'] != vsan_config_info.objectRepairTimer:
+ return True
+ if self.advanced_options['thin_swap'] is not None and \
+ self.advanced_options['thin_swap'] != vsan_config_info.enableCustomizedSwapObject:
+ return True
+
+ return False
+
+ def configure_vsan(self):
+ """
+ Manage VSAN configuration
+
+ """
+ changed, result = False, None
+
+ if self.check_vsan_config_diff():
+ if not self.module.check_mode:
+ vSanSpec = vim.vsan.ReconfigSpec(
+ modify=True,
+ )
+ vSanSpec.vsanClusterConfig = vim.vsan.cluster.ConfigInfo(
+ enabled=self.enable_vsan
+ )
+ vSanSpec.vsanClusterConfig.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo(
+ autoClaimStorage=self.params.get('vsan_auto_claim_storage')
+ )
+ if self.advanced_options is not None:
+ vSanSpec.extendedConfig = vim.vsan.VsanExtendedConfig()
+ if self.advanced_options['automatic_rebalance'] is not None:
+ vSanSpec.extendedConfig.proactiveRebalanceInfo = vim.vsan.ProactiveRebalanceInfo(
+ enabled=self.advanced_options['automatic_rebalance']
+ )
+ if self.advanced_options['disable_site_read_locality'] is not None:
+ vSanSpec.extendedConfig.disableSiteReadLocality = self.advanced_options['disable_site_read_locality']
+ if self.advanced_options['large_cluster_support'] is not None:
+ vSanSpec.extendedConfig.largeScaleClusterSupport = self.advanced_options['large_cluster_support']
+ if self.advanced_options['object_repair_timer'] is not None:
+ vSanSpec.extendedConfig.objectRepairTimer = self.advanced_options['object_repair_timer']
+ if self.advanced_options['thin_swap'] is not None:
+ vSanSpec.extendedConfig.enableCustomizedSwapObject = self.advanced_options['thin_swap']
+ try:
+ task = self.vsanClusterConfigSystem.VsanClusterReconfig(self.cluster, vSanSpec)
+ changed, result = wait_for_task(vim.Task(task._moId, self.si._stub))
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to update cluster"
+ " due to generic exception %s" % to_native(generic_exc))
+ else:
+ changed = True
+
+ self.module.exit_json(changed=changed, result=result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ cluster_name=dict(type='str', required=True),
+ datacenter=dict(type='str', required=True, aliases=['datacenter_name']),
+ # VSAN
+ enable=dict(type='bool', default=True),
+ vsan_auto_claim_storage=dict(type='bool', default=False),
+ advanced_options=dict(type='dict', options=dict(
+ automatic_rebalance=dict(type='bool', required=False),
+ disable_site_read_locality=dict(type='bool', required=False),
+ large_cluster_support=dict(type='bool', required=False),
+ object_repair_timer=dict(type='int', required=False),
+ thin_swap=dict(type='bool', required=False),
+ )),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not HAS_VSANPYTHONSDK:
+ module.fail_json(msg=missing_required_lib('vSAN Management SDK for Python'), exception=VSANPYTHONSDK_IMP_ERR)
+
+ vmware_cluster_vsan = VMwareCluster(module)
+ vmware_cluster_vsan.configure_vsan()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_content_deploy_ovf_template.py b/ansible_collections/community/vmware/plugins/modules/vmware_content_deploy_ovf_template.py
new file mode 100644
index 000000000..c74151904
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_content_deploy_ovf_template.py
@@ -0,0 +1,441 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Lev Goncharov <lev@goncharov.xyz>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_content_deploy_ovf_template
+short_description: Deploy Virtual Machine from ovf template stored in content library.
+description:
+- Module to deploy virtual machine from ovf template in content library.
+- All variables and VMware object names are case sensitive.
+author:
+- Lev Goncharv (@ultral)
+requirements:
+- vSphere Automation SDK
+options:
+ log_level:
+ description:
+ - The level of logging desired in this module.
+ type: str
+ required: false
+ default: 'normal'
+ choices: [ 'debug', 'info', 'normal' ]
+ template:
+ description:
+ - The name of OVF template from which VM to be deployed.
+ type: str
+ required: true
+ aliases: ['ovf', 'ovf_template', 'template_src']
+ library:
+ description:
+ - The name of the content library from where the template resides.
+ type: str
+ required: false
+ aliases: ['content_library', 'content_library_src']
+ name:
+ description:
+ - The name of the VM to be deployed.
+ type: str
+ required: true
+ aliases: ['vm_name']
+ datacenter:
+ description:
+ - Name of the datacenter, where VM to be deployed.
+ type: str
+ required: true
+ datastore:
+ description:
+ - Name of the datastore to store deployed VM and disk.
+ type: str
+ required: false
+ datastore_cluster:
+ description:
+ - Name of the datastore cluster housing a datastore to store deployed VM and disk.
+ - If datastore is not specified, the recommended datastore from this cluster will be used.
+ type: str
+ required: false
+ folder:
+ description:
+ - Name of the folder in datacenter in which to place deployed VM.
+ type: str
+ default: 'vm'
+ host:
+ description:
+ - Name of the ESX Host in datacenter in which to place deployed VM. The host has to be a member of the cluster that contains the resource pool.
+ type: str
+ required: false
+ resource_pool:
+ description:
+ - Name of the resourcepool in datacenter in which to place deployed VM.
+ type: str
+ required: false
+ cluster:
+ description:
+ - Name of the cluster in datacenter in which to place deployed VM.
+ type: str
+ required: false
+ storage_provisioning:
+ description:
+ - Default storage provisioning type to use for all sections of type vmw:StorageSection in the OVF descriptor.
+ type: str
+ default: 'thin'
+ choices: [ thin, thick, eagerZeroedThick, eagerzeroedthick ]
+extends_documentation_fragment: community.vmware.vmware_rest_client.documentation
+'''
+
+EXAMPLES = r'''
+- name: Deploy Virtual Machine from OVF template in content library
+ community.vmware.vmware_content_deploy_ovf_template:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ ovf_template: rhel_test_template
+ datastore: Shared_NFS_Volume
+ folder: vm
+ datacenter: Sample_DC_1
+ name: Sample_VM
+ resource_pool: test_rp
+ delegate_to: localhost
+
+- name: Deploy Virtual Machine from OVF template in content library with eagerZeroedThick storage
+ vmware_content_deploy_ovf_template:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ ovf_template: rhel_test_template
+ datastore: Shared_NFS_Volume
+ folder: vm
+ datacenter: Sample_DC_1
+ name: Sample_VM
+ resource_pool: test_rp
+ storage_provisioning: eagerZeroedThick
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+vm_deploy_info:
+ description: Virtual machine deployment message and vm_id
+ returned: on success
+ type: dict
+ sample: {
+ "msg": "Deployed Virtual Machine 'Sample_VM'.",
+ "vm_id": "vm-1009"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi
+
+HAS_VAUTOMATION = False
+try:
+ from com.vmware.vcenter.ovf_client import LibraryItem
+ from com.vmware.vapi.std.errors_client import Error
+ HAS_VAUTOMATION = True
+except ImportError:
+ pass
+
+
+class VmwareContentDeployOvfTemplate(VmwareRestClient):
+ def __init__(self, module):
+ """Constructor."""
+ super(VmwareContentDeployOvfTemplate, self).__init__(module)
+
+ # Initialize member variables
+ self.module = module
+ self._pyv = PyVmomi(module=module)
+ self._template_service = self.api_client.vcenter.vm_template.LibraryItems
+ self._datacenter_id = None
+ self._datastore_id = None
+ self._library_item_id = None
+ self._folder_id = None
+ self._host_id = None
+ self._cluster_id = None
+ self._resourcepool_id = None
+ self.result = {}
+
+ # Turn on debug if not specified, but ANSIBLE_DEBUG is set
+ if self.module._debug:
+ self.warn('Enable debug output because ANSIBLE_DEBUG was set.')
+ self.params['log_level'] = 'debug'
+ self.log_level = self.params['log_level']
+ if self.log_level == 'debug':
+ # Turn on debugging
+ self.result['debug'] = {}
+
+ # Get parameters
+ self.template = self.params.get('template')
+ self.library = self.params.get('library')
+ self.vm_name = self.params.get('name')
+ self.datacenter = self.params.get('datacenter')
+ self.datastore = self.params.get('datastore')
+ self.datastore_cluster = self.params.get('datastore_cluster')
+ self.folder = self.params.get('folder')
+ self.resourcepool = self.params.get('resource_pool')
+ self.cluster = self.params.get('cluster')
+ self.host = self.params.get('host')
+ self.storage_provisioning = self.params['storage_provisioning']
+ if self.storage_provisioning == 'eagerzeroedthick':
+ self.storage_provisioning = 'eagerZeroedThick'
+
+ vm = self._pyv.get_vm()
+ if vm:
+ self.result['vm_deploy_info'] = dict(
+ msg="Virtual Machine '%s' already Exists." % self.vm_name,
+ vm_id=vm._moId,
+ )
+ self._fail(msg="Virtual Machine deployment failed")
+
+ def deploy_vm_from_ovf_template(self):
+ # Find the datacenter by the given datacenter name
+ self._datacenter_id = self.get_datacenter_by_name(self.datacenter)
+ if not self._datacenter_id:
+ self._fail(msg="Failed to find the datacenter %s" % self.datacenter)
+
+ # Find the datastore by the given datastore name
+ if self.datastore:
+ self._datastore_id = self.get_datastore_by_name(self.datacenter, self.datastore)
+ if not self._datastore_id:
+ self._fail(msg="Failed to find the datastore %s" % self.datastore)
+
+ # Find the datastore by the given datastore cluster name
+ if self.datastore_cluster and not self._datastore_id:
+ dsc = self._pyv.find_datastore_cluster_by_name(self.datastore_cluster)
+ if dsc:
+ self.datastore = self._pyv.get_recommended_datastore(dsc)
+ self._datastore_id = self.get_datastore_by_name(self.datacenter, self.datastore)
+ else:
+ self._fail(msg="Failed to find the datastore cluster %s" % self.datastore_cluster)
+
+ if not self._datastore_id:
+ self._fail(msg="Failed to find the datastore using either datastore or datastore cluster")
+
+ # Find the LibraryItem (Template) by the given LibraryItem name
+ if self.library:
+ self._library_item_id = self.get_library_item_from_content_library_name(
+ self.template, self.library
+ )
+ if not self._library_item_id:
+ self._fail(msg="Failed to find the library Item %s in content library %s" % (self.template, self.library))
+ else:
+ self._library_item_id = self.get_library_item_by_name(self.template)
+ if not self._library_item_id:
+ self._fail(msg="Failed to find the library Item %s" % self.template)
+
+ # Find the folder by the given FQPN folder name
+ # The FQPN is I(datacenter)/I(folder type)/folder name/... for
+ # example Lab/vm/someparent/myfolder is a vm folder in the Lab datacenter.
+ folder_obj = self._pyv.find_folder_by_fqpn(self.folder, self.datacenter, folder_type='vm')
+ if folder_obj:
+ self._folder_id = folder_obj._moId
+ if not self._folder_id:
+ self._fail(msg="Failed to find the folder %s" % self.folder)
+
+ # Find the Host by the given name
+ if self.host:
+ self._host_id = self.get_host_by_name(self.datacenter, self.host)
+ if not self._host_id:
+ self._fail(msg="Failed to find the Host %s" % self.host)
+
+ # Find the Cluster by the given Cluster name
+ if self.cluster:
+ self._cluster_id = self.get_cluster_by_name(self.datacenter, self.cluster)
+ if not self._cluster_id:
+ self._fail(msg="Failed to find the Cluster %s" % self.cluster)
+ cluster_obj = self.api_client.vcenter.Cluster.get(self._cluster_id)
+ self._resourcepool_id = cluster_obj.resource_pool
+
+ # Find the resourcepool by the given resourcepool name
+ if self.resourcepool:
+ self._resourcepool_id = self.get_resource_pool_by_name(self.datacenter, self.resourcepool, self.cluster, self.host)
+ if not self._resourcepool_id:
+ self._fail(msg="Failed to find the resource_pool %s" % self.resourcepool)
+
+ if not self._resourcepool_id:
+ self._fail(msg="Failed to find a resource pool either by name or cluster")
+
+ deployment_target = LibraryItem.DeploymentTarget(
+ resource_pool_id=self._resourcepool_id,
+ folder_id=self._folder_id
+ )
+
+ self.ovf_summary = self.api_client.vcenter.ovf.LibraryItem.filter(
+ ovf_library_item_id=self._library_item_id,
+ target=deployment_target
+ )
+
+ self.deploy_spec = LibraryItem.ResourcePoolDeploymentSpec(
+ name=self.vm_name,
+ annotation=self.ovf_summary.annotation,
+ accept_all_eula=True,
+ network_mappings=None,
+ storage_mappings=None,
+ storage_provisioning=self.storage_provisioning,
+ storage_profile_id=None,
+ locale=None,
+ flags=None,
+ additional_parameters=None,
+ default_datastore_id=self._datastore_id
+ )
+
+ response = {
+ 'succeeded': False
+ }
+ try:
+ response = self.api_client.vcenter.ovf.LibraryItem.deploy(self._library_item_id, deployment_target, self.deploy_spec)
+ except Error as error:
+ self._fail(msg="%s" % self.get_error_message(error))
+ except Exception as err:
+ self._fail(msg="%s" % to_native(err))
+
+ if not response.succeeded:
+ self.result['vm_deploy_info'] = dict(
+ msg="Virtual Machine deployment failed",
+ vm_id=''
+ )
+ self._fail(msg="Virtual Machine deployment failed")
+ self.result['changed'] = True
+ self.result['vm_deploy_info'] = dict(
+ msg="Deployed Virtual Machine '%s'." % self.vm_name,
+ vm_id=response.resource_id.id,
+ )
+ self._exit()
+
+ #
+ # Wrap AnsibleModule methods
+ #
+
+ def _mod_debug(self):
+ if self.log_level == 'debug':
+ self.result['debug'].update(
+ dict(
+ datacenter_id=self._datacenter_id,
+ datastore_id=self._datastore_id,
+ library_item_id=self._library_item_id,
+ folder_id=self._folder_id,
+ host_id=self._host_id,
+ cluster_id=self._cluster_id,
+ resourcepool_id=self._resourcepool_id,
+ )
+ )
+
+ def _fail(self, msg):
+ self._mod_debug()
+ self.module.fail_json(msg=msg, **self.result)
+
+ def _exit(self):
+ self._mod_debug()
+ self.module.exit_json(**self.result)
+
+
+def main():
+ argument_spec = VmwareRestClient.vmware_client_argument_spec()
+ argument_spec.update(
+ log_level=dict(
+ type='str',
+ choices=[
+ 'debug',
+ 'info',
+ 'normal',
+ ],
+ default='normal'
+ ),
+ template=dict(
+ type='str',
+ aliases=[
+ 'ovf',
+ 'ovf_template',
+ 'template_src'
+ ],
+ required=True
+ ),
+ library=dict(
+ type='str',
+ aliases=[
+ 'content_library',
+ 'content_library_src'
+ ],
+ required=False
+ ),
+ name=dict(
+ type='str',
+ aliases=[
+ 'vm_name'
+ ],
+ required=True
+ ),
+ datacenter=dict(
+ type='str',
+ required=True
+ ),
+ datastore=dict(
+ type='str',
+ required=False
+ ),
+ datastore_cluster=dict(
+ type='str',
+ required=False
+ ),
+ folder=dict(
+ type='str',
+ default='vm'
+ ),
+ host=dict(
+ type='str',
+ required=False
+ ),
+ resource_pool=dict(
+ type='str',
+ required=False
+ ),
+ cluster=dict(
+ type='str',
+ required=False
+ ),
+ storage_provisioning=dict(
+ type='str',
+ choices=[
+ 'thin',
+ 'thick',
+ 'eagerZeroedThick',
+ 'eagerzeroedthick'
+ ],
+ default='thin',
+ fallback=(
+ env_fallback,
+ ['VMWARE_STORAGE_PROVISIONING']
+ )
+ ),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['datastore', 'datastore_cluster'],
+ ['host', 'cluster'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+ vmware_contentlib_create = VmwareContentDeployOvfTemplate(module)
+ if module.check_mode:
+ result.update(
+ vm_name=module.params['name'],
+ changed=True,
+ desired_operation='Create VM with PowerOff State',
+ )
+ module.exit_json(**result)
+ vmware_contentlib_create.deploy_vm_from_ovf_template()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_content_deploy_template.py b/ansible_collections/community/vmware/plugins/modules/vmware_content_deploy_template.py
new file mode 100644
index 000000000..6a10d7daa
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_content_deploy_template.py
@@ -0,0 +1,460 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# Copyright: (c) 2019, Pavan Bidkar <pbidkar@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_content_deploy_template
+short_description: Deploy Virtual Machine from template stored in content library.
+description:
+- Module to deploy virtual machine from template in content library.
+- Content Library feature is introduced in vSphere 6.0 version.
+- vmtx templates feature is introduced in vSphere 67U1 and APIs for clone template from content library in 67U2.
+- This module does not work with vSphere version older than 67U2.
+- All variables and VMware object names are case sensitive.
+author:
+- Pavan Bidkar (@pgbidkar)
+requirements:
+- vSphere Automation SDK
+options:
+ log_level:
+ description:
+ - The level of logging desired in this module.
+ type: str
+ required: false
+ default: 'normal'
+ choices: [ 'debug', 'info', 'normal' ]
+ template:
+ description:
+ - The name of template from which VM to be deployed.
+ type: str
+ required: true
+ aliases: ['template_src']
+ library:
+ description:
+ - The name of the content library from where the template resides.
+ type: str
+ required: false
+ aliases: ['content_library', 'content_library_src']
+ name:
+ description:
+ - The name of the VM to be deployed.
+ type: str
+ required: true
+ aliases: ['vm_name']
+ datacenter:
+ description:
+ - Name of the datacenter, where VM to be deployed.
+ type: str
+ required: true
+ datastore:
+ description:
+ - Name of the datastore to store deployed VM and disk.
+ - Required if I(datastore_cluster) is not provided.
+ type: str
+ required: false
+ datastore_cluster:
+ description:
+ - Name of the datastore cluster to store deployed VM and disk.
+ - Please make sure Storage DRS is active for recommended datastore from the given datastore cluster.
+ - If Storage DRS is not enabled, datastore with largest free storage space is selected.
+ - Required if I(datastore) is not provided.
+ type: str
+ required: false
+ folder:
+ description:
+ - Name of the folder in datacenter in which to place deployed VM.
+ type: str
+ default: 'vm'
+ host:
+ description:
+ - Name of the ESX Host in datacenter in which to place deployed VM.
+ - The host has to be a member of the cluster that contains the resource pool.
+ - Required with I(resource_pool) to find resource pool details. This will be used as additional
+ information when there are resource pools with same name.
+ type: str
+ required: false
+ resource_pool:
+ description:
+ - Name of the resource pool in datacenter in which to place deployed VM.
+ - Required if I(cluster) is not specified.
+ - For default or non-unique resource pool names, specify I(host) and I(cluster).
+ - C(Resources) is the default name of resource pool.
+ type: str
+ required: false
+ cluster:
+ description:
+ - Name of the cluster in datacenter in which to place deployed VM.
+ - Required if I(resource_pool) is not specified.
+ type: str
+ required: false
+ state:
+ description:
+ - The state of Virtual Machine deployed from template in content library.
+ - If set to C(present) and VM does not exists, then VM is created.
+ - If set to C(present) and VM exists, no action is taken.
+ - If set to C(poweredon) and VM does not exists, then VM is created with powered on state.
+ - If set to C(poweredon) and VM exists, no action is taken.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ 'present', 'poweredon' ]
+extends_documentation_fragment:
+- community.vmware.vmware_rest_client.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Deploy Virtual Machine from template in content library
+ community.vmware.vmware_content_deploy_template:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ template: rhel_test_template
+ datastore: Shared_NFS_Volume
+ folder: vm
+ datacenter: Sample_DC_1
+ name: Sample_VM
+ resource_pool: test_rp
+ state: present
+ delegate_to: localhost
+
+- name: Deploy Virtual Machine from template in content library with PowerON State
+ community.vmware.vmware_content_deploy_template:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ template: rhel_test_template
+ content_library: test_content_library
+ datastore: Shared_NFS_Volume
+ folder: vm
+ datacenter: Sample_DC_1
+ name: Sample_VM
+ resource_pool: test_rp
+ state: poweredon
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+vm_deploy_info:
+ description: Virtual machine deployment message and vm_id
+ returned: on success
+ type: dict
+ sample: {
+ "msg": "Deployed Virtual Machine 'Sample_VM'.",
+ "vm_id": "vm-1009"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi
+from ansible.module_utils._text import to_native
+
+HAS_VAUTOMATION_PYTHON_SDK = False
+try:
+ from com.vmware.vcenter.vm_template_client import LibraryItems
+ from com.vmware.vapi.std.errors_client import Error
+ HAS_VAUTOMATION_PYTHON_SDK = True
+except ImportError:
+ pass
+
+
+class VmwareContentDeployTemplate(VmwareRestClient):
+ def __init__(self, module):
+ """Constructor."""
+ super(VmwareContentDeployTemplate, self).__init__(module)
+
+ # Initialize member variables
+ self.module = module
+ self._pyv = PyVmomi(module=module)
+ self._template_service = self.api_client.vcenter.vm_template.LibraryItems
+ self._datacenter_id = None
+ self._datastore_id = None
+ self._library_item_id = None
+ self._folder_id = None
+ self._host_id = None
+ self._cluster_id = None
+ self._resourcepool_id = None
+ self.result = {}
+
+ # Turn on debug if not specified, but ANSIBLE_DEBUG is set
+ if self.module._debug:
+ self.warn('Enable debug output because ANSIBLE_DEBUG was set.')
+ self.params['log_level'] = 'debug'
+ self.log_level = self.params['log_level']
+ if self.log_level == 'debug':
+ # Turn on debugging
+ self.result['debug'] = {}
+
+ # Get parameters
+ self.template = self.params.get('template')
+ self.library = self.params.get('library')
+ self.vm_name = self.params.get('name')
+ self.datacenter = self.params.get('datacenter')
+ self.datastore = self.params.get('datastore')
+ self.datastore_cluster = self.params.get('datastore_cluster')
+ self.folder = self.params.get('folder')
+ self.resourcepool = self.params.get('resource_pool')
+ self.cluster = self.params.get('cluster')
+ self.host = self.params.get('host')
+
+ vm = self._pyv.get_vm()
+ if vm:
+ self.result['vm_deploy_info'] = dict(
+ msg="Virtual Machine '%s' already Exists." % self.vm_name,
+ vm_id=vm._moId,
+ )
+ self._fail(msg="Virtual Machine deployment failed")
+
+ def deploy_vm_from_template(self, power_on=False):
+ # Find the datacenter by the given datacenter name
+ self._datacenter_id = self.get_datacenter_by_name(self.datacenter)
+ if not self._datacenter_id:
+ self._fail(msg="Failed to find the datacenter %s" % self.datacenter)
+
+ # Find the datastore by the given datastore name
+ if self.datastore:
+ self._datastore_id = self.get_datastore_by_name(self.datacenter, self.datastore)
+ if not self._datastore_id:
+ self._fail(msg="Failed to find the datastore %s" % self.datastore)
+
+ # Find the datastore by the given datastore cluster name
+ if self.datastore_cluster and not self._datastore_id:
+ dsc = self._pyv.find_datastore_cluster_by_name(self.datastore_cluster)
+ if dsc:
+ self.datastore = self._pyv.get_recommended_datastore(dsc)
+ self._datastore_id = self.get_datastore_by_name(self.datacenter, self.datastore)
+ else:
+ self._fail(msg="Failed to find the datastore cluster %s" % self.datastore_cluster)
+
+ if not self._datastore_id:
+ self._fail(msg="Failed to find the datastore using either datastore or datastore cluster")
+
+ # Find the LibraryItem (Template) by the given LibraryItem name
+ if self.library:
+ self._library_item_id = self.get_library_item_from_content_library_name(
+ self.template, self.library
+ )
+ if not self._library_item_id:
+ self._fail(msg="Failed to find the library Item %s in content library %s" % (self.template, self.library))
+ else:
+ self._library_item_id = self.get_library_item_by_name(self.template)
+ if not self._library_item_id:
+ self._fail(msg="Failed to find the library Item %s" % self.template)
+
+ # Find the folder by the given FQPN folder name
+ # The FQPN is I(datacenter)/I(folder type)/folder name/... for
+ # example Lab/vm/someparent/myfolder is a vm folder in the Lab datacenter.
+ folder_obj = self._pyv.find_folder_by_fqpn(self.folder, self.datacenter, folder_type='vm')
+ if folder_obj:
+ self._folder_id = folder_obj._moId
+ if not self._folder_id:
+ self._fail(msg="Failed to find the folder %s" % self.folder)
+
+ # Find the Host by the given name
+ if self.host:
+ self._host_id = self.get_host_by_name(self.datacenter, self.host)
+ if not self._host_id:
+ self._fail(msg="Failed to find the Host %s" % self.host)
+
+ # Find the Cluster by the given Cluster name
+ if self.cluster:
+ self._cluster_id = self.get_cluster_by_name(self.datacenter, self.cluster)
+ if not self._cluster_id:
+ self._fail(msg="Failed to find the Cluster %s" % self.cluster)
+ cluster_obj = self.api_client.vcenter.Cluster.get(self._cluster_id)
+ self._resourcepool_id = cluster_obj.resource_pool
+
+ # Find the resourcepool by the given resourcepool name
+ if self.resourcepool:
+ self._resourcepool_id = self.get_resource_pool_by_name(self.datacenter, self.resourcepool, self.cluster, self.host)
+ if not self._resourcepool_id:
+ self._fail(msg="Failed to find the resource_pool %s" % self.resourcepool)
+
+ # Create VM placement specs
+ self.placement_spec = LibraryItems.DeployPlacementSpec(folder=self._folder_id)
+ if self._host_id:
+ self.placement_spec.host = self._host_id
+ if self._resourcepool_id:
+ self.placement_spec.resource_pool = self._resourcepool_id
+ if self._cluster_id:
+ self.placement_spec.cluster = self._cluster_id
+ self.vm_home_storage_spec = LibraryItems.DeploySpecVmHomeStorage(
+ datastore=to_native(self._datastore_id)
+ )
+ self.disk_storage_spec = LibraryItems.DeploySpecDiskStorage(
+ datastore=to_native(self._datastore_id)
+ )
+ self.deploy_spec = LibraryItems.DeploySpec(
+ name=self.vm_name,
+ placement=self.placement_spec,
+ vm_home_storage=self.vm_home_storage_spec,
+ disk_storage=self.disk_storage_spec,
+ powered_on=power_on
+ )
+ vm_id = ''
+ try:
+ vm_id = self._template_service.deploy(self._library_item_id, self.deploy_spec)
+ except Error as error:
+ self._fail(msg="%s" % self.get_error_message(error))
+ except Exception as err:
+ self._fail(msg="%s" % to_native(err))
+
+ if not vm_id:
+ self.result['vm_deploy_info'] = dict(
+ msg="Virtual Machine deployment failed",
+ vm_id=''
+ )
+ self._fail(msg="Virtual Machine deployment failed")
+ self.result['changed'] = True
+ self.result['vm_deploy_info'] = dict(
+ msg="Deployed Virtual Machine '%s'." % self.vm_name,
+ vm_id=vm_id,
+ )
+ self._exit()
+
+ #
+ # Wrap AnsibleModule methods
+ #
+
+ def _mod_debug(self):
+ if self.log_level == 'debug':
+ self.result['debug'] = dict(
+ datacenter_id=self._datacenter_id,
+ datastore_id=self._datastore_id,
+ library_item_id=self._library_item_id,
+ folder_id=self._folder_id,
+ host_id=self._host_id,
+ cluster_id=self._cluster_id,
+ resourcepool_id=self._resourcepool_id
+ )
+
+ def _fail(self, msg):
+ self._mod_debug()
+ self.module.fail_json(msg=msg, **self.result)
+
+ def _exit(self):
+ self._mod_debug()
+ self.module.exit_json(**self.result)
+
+
+def main():
+ argument_spec = VmwareRestClient.vmware_client_argument_spec()
+ argument_spec.update(
+ log_level=dict(
+ type='str',
+ choices=[
+ 'debug',
+ 'info',
+ 'normal',
+ ],
+ default='normal'
+ ),
+ state=dict(
+ type='str',
+ choices=[
+ 'present',
+ 'poweredon'
+ ],
+ default='present'
+ ),
+ template=dict(
+ type='str',
+ aliases=[
+ 'template_src'
+ ],
+ required=True
+ ),
+ library=dict(
+ type='str',
+ aliases=[
+ 'content_library',
+ 'content_library_src',
+ ],
+ required=False
+ ),
+ name=dict(
+ type='str',
+ aliases=[
+ 'vm_name'
+ ],
+ required=True,
+ ),
+ datacenter=dict(
+ type='str',
+ required=True
+ ),
+ datastore=dict(
+ type='str',
+ required=False
+ ),
+ datastore_cluster=dict(
+ type='str',
+ required=False
+ ),
+ folder=dict(
+ type='str',
+ default='vm'
+ ),
+ host=dict(
+ type='str',
+ required=False
+ ),
+ resource_pool=dict(
+ type='str',
+ required=False
+ ),
+ cluster=dict(
+ type='str',
+ required=False
+ ),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['datastore', 'datastore_cluster'],
+ ['host', 'cluster'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+ vmware_contentlib_create = VmwareContentDeployTemplate(module)
+ if module.params['state'] == 'present':
+ if module.check_mode:
+ result.update(
+ vm_name=module.params['name'],
+ changed=True,
+ desired_operation='Create VM with PowerOff State',
+ )
+ module.exit_json(**result)
+ vmware_contentlib_create.deploy_vm_from_template()
+ elif module.params['state'] == 'poweredon':
+ if module.check_mode:
+ result.update(
+ vm_name=module.params['name'],
+ changed=True,
+ desired_operation='Create VM with PowerON State',
+ )
+ module.exit_json(**result)
+ vmware_contentlib_create.deploy_vm_from_template(power_on=True)
+ else:
+ result.update(
+ vm_name=module.params['name'],
+ changed=False,
+ desired_operation="State '%s' is not implemented" % module.params['state']
+ )
+ module.fail_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_content_library_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_content_library_info.py
new file mode 100644
index 000000000..69cbb7cd0
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_content_library_info.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# Copyright: (c) 2019, Pavan Bidkar <pbidkar@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_content_library_info
+short_description: Gather information about VMWare Content Library
+description:
+- Module to list the content libraries.
+- Module to get information about specific content library.
+- Content Library feature is introduced in vSphere 6.0 version, so this module is not supported in the earlier versions of vSphere.
+- All variables and VMware object names are case sensitive.
+author:
+- Pavan Bidkar (@pgbidkar)
+requirements:
+- vSphere Automation SDK
+options:
+ library_id:
+ description:
+ - content library id for which details needs to be fetched.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.vmware.vmware_rest_client.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Get List of Content Libraries
+ community.vmware.vmware_content_library_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ delegate_to: localhost
+
+- name: Get information about content library
+ community.vmware.vmware_content_library_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ library_id: '13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+content_lib_details:
+ description: list of content library metadata
+ returned: on success
+ type: list
+ sample: [
+ {
+ "library_creation_time": "2019-07-02T11:50:52.242000",
+ "library_description": "new description",
+ "library_id": "13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8",
+ "library_name": "demo-local-lib",
+ "library_publish_info": {
+ "authentication_method": "NONE",
+ "persist_json_enabled": false,
+ "publish_url": null,
+ "published": false,
+ "user_name": null
+ },
+ "library_server_guid": "0fd5813b-aac7-4b92-9fb7-f18f16565613",
+ "library_type": "LOCAL",
+ "library_version": "3"
+ }
+ ]
+content_libs:
+ description: list of content libraries
+ returned: on success
+ type: list
+ sample: [
+ "ded9c4d5-0dcd-4837-b1d8-af7398511e33",
+ "36b72549-14ed-4b5f-94cb-6213fecacc02"
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+
+
+class VmwareContentLibInfo(VmwareRestClient):
+ def __init__(self, module):
+ """Constructor."""
+ super(VmwareContentLibInfo, self).__init__(module)
+ self.content_service = self.api_client
+ self.local_content_libraries = self.content_service.content.LocalLibrary.list()
+ if self.local_content_libraries is None:
+ self.local_content_libraries = []
+
+ self.subscribed_content_libraries = self.content_service.content.SubscribedLibrary.list()
+ if self.subscribed_content_libraries is None:
+ self.subscribed_content_libraries = []
+
+ self.library_info = []
+
+ def get_all_content_libs(self):
+ """Method to retrieve List of content libraries."""
+ content_libraries = self.local_content_libraries + self.subscribed_content_libraries
+
+ self.module.exit_json(changed=False, content_libs=content_libraries)
+
+ def get_content_lib_details(self, library_id):
+ """Method to retrieve Details of contentlib with library_id"""
+ lib_publish_info = None
+
+ if library_id in self.local_content_libraries:
+ try:
+ lib_details = self.content_service.content.LocalLibrary.get(library_id)
+ lib_publish_info = dict(
+ persist_json_enabled=lib_details.publish_info.persist_json_enabled,
+ authentication_method=lib_details.publish_info.authentication_method,
+ publish_url=lib_details.publish_info.publish_url,
+ published=lib_details.publish_info.published,
+ user_name=lib_details.publish_info.user_name
+ )
+ except Exception as e:
+ self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e))
+ elif library_id in self.subscribed_content_libraries:
+ try:
+ lib_details = self.content_service.content.SubscribedLibrary.get(library_id)
+ except Exception as e:
+ self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e))
+ else:
+ self.module.fail_json(exists=False, msg="Library %s not found." % library_id)
+
+ self.library_info.append(
+ dict(
+ library_name=lib_details.name,
+ library_description=lib_details.description,
+ library_id=lib_details.id,
+ library_type=lib_details.type,
+ library_creation_time=lib_details.creation_time,
+ library_server_guid=lib_details.server_guid,
+ library_version=lib_details.version,
+ library_publish_info=lib_publish_info
+ )
+ )
+
+ self.module.exit_json(exists=False, changed=False, content_lib_details=self.library_info)
+
+
+def main():
+ argument_spec = VmwareRestClient.vmware_client_argument_spec()
+ argument_spec.update(
+ library_id=dict(type='str', required=False),
+ )
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ vmware_contentlib_info = VmwareContentLibInfo(module)
+ if module.params.get('library_id'):
+ vmware_contentlib_info.get_content_lib_details(module.params['library_id'])
+ else:
+ vmware_contentlib_info.get_all_content_libs()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_content_library_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_content_library_manager.py
new file mode 100644
index 000000000..4b52b6cd1
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_content_library_manager.py
@@ -0,0 +1,438 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# Copyright: (c) 2019, Pavan Bidkar <pbidkar@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_content_library_manager
+short_description: Create, update and delete VMware content library
+description:
+- Module to manage VMware content Library
+- Content Library feature is introduced in vSphere 6.0 version, so this module is not supported in the earlier versions of vSphere.
+- All variables and VMware object names are case sensitive.
+author:
+- Pavan Bidkar (@pgbidkar)
+requirements:
+- vSphere Automation SDK
+options:
+ library_name:
+ description:
+ - The name of VMware content library to manage.
+ type: str
+ required: true
+ library_description:
+ description:
+ - The content library description.
+ - This is required only if I(state) is set to C(present).
+ - This parameter is ignored, when I(state) is set to C(absent).
+ - Process of updating content library only allows description change.
+ type: str
+ required: false
+ library_type:
+ description:
+ - The content library type.
+ - This is required only if I(state) is set to C(present).
+ - This parameter is ignored, when I(state) is set to C(absent).
+ type: str
+ required: false
+ default: 'local'
+ choices: [ 'local', 'subscribed' ]
+ datastore_name:
+ description:
+ - Name of the datastore on which backing content library is created.
+ - This is required only if I(state) is set to C(present).
+ - This parameter is ignored, when I(state) is set to C(absent).
+ - Currently only datastore backing creation is supported.
+ type: str
+ required: false
+ aliases: ['datastore']
+ subscription_url:
+ description:
+ - The url of the content library to subscribe to.
+ - This is required only if I(library_type) is set to C(subscribed).
+ - This parameter is ignored, when I(state) is set to C(absent).
+ type: str
+ default: ''
+ required: false
+ ssl_thumbprint:
+ description:
+ - The SHA1 SSL thumbprint of the subscribed content library to subscribe to.
+ - This is required only if I(library_type) is set to C(subscribed) and the library is https.
+ - This parameter is ignored, when I(state) is set to C(absent).
+ - 'The information can be extracted using openssl using the following example:
+ C(echo | openssl s_client -connect test-library.com:443 |& openssl x509 -fingerprint -noout)'
+ type: str
+ default: ''
+ required: false
+ update_on_demand:
+ description:
+ - Whether to download all content on demand.
+ - If set to C(true), all content will be downloaded on demand.
+ - If set to C(false) content will be downloaded ahead of time.
+ - This is required only if I(library_type) is set to C(subscribed).
+ - This parameter is ignored, when I(state) is set to C(absent).
+ type: bool
+ default: false
+ state:
+ description:
+ - The state of content library.
+ - If set to C(present) and library does not exists, then content library is created.
+ - If set to C(present) and library exists, then content library is updated.
+ - If set to C(absent) and library exists, then content library is deleted.
+ - If set to C(absent) and library does not exists, no action is taken.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+extends_documentation_fragment:
+- community.vmware.vmware_rest_client.documentation
+
+'''
+EXAMPLES = r'''
+- name: Create Local Content Library
+ community.vmware.vmware_content_library_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ library_name: test-content-lib
+ library_description: 'Library with Datastore Backing'
+ library_type: local
+ datastore_name: datastore
+ state: present
+ delegate_to: localhost
+
+- name: Create Subscribed Content Library
+ community.vmware.vmware_content_library_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ library_name: test-content-lib
+ library_description: 'Subscribed Library with Datastore Backing'
+ library_type: subscribed
+ datastore_name: datastore
+ subscription_url: 'https://library.url'
+ ssl_thumbprint: 'aa:bb:cc:dd:ee:ff:gg:hh:ii:jj:kk:ll:mm:nn:oo:pp:qq:rr:ss:tt'
+ update_on_demand: true
+ state: present
+ delegate_to: localhost
+
+- name: Update Content Library
+ community.vmware.vmware_content_library_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ library_name: test-content-lib
+ library_description: 'Library with Datastore Backing'
+ state: present
+ delegate_to: localhost
+
+- name: Delete Content Library
+ community.vmware.vmware_content_library_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ library_name: test-content-lib
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+content_library_info:
+ description: library creation success and library_id
+ returned: on success
+ type: dict
+ sample: {
+ "library_id": "d0b92fa9-7039-4f29-8e9c-0debfcb22b72",
+ "library_description": 'Test description',
+ "library_type": 'LOCAL',
+ "msg": "Content Library 'demo-local-lib-4' created.",
+ }
+'''
+
+import uuid
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi
+
+HAS_VAUTOMATION_PYTHON_SDK = False
+try:
+ from com.vmware.content_client import LibraryModel
+ from com.vmware.content.library_client import StorageBacking, SubscriptionInfo
+ from com.vmware.vapi.std.errors_client import ResourceInaccessible
+ HAS_VAUTOMATION_PYTHON_SDK = True
+except ImportError:
+ pass
+
+
+class VmwareContentLibCreate(VmwareRestClient):
+ def __init__(self, module):
+ """Constructor."""
+ super(VmwareContentLibCreate, self).__init__(module)
+ self.content_service = self.api_client
+ self.local_libraries = dict()
+ # Track all existing library names, to block update/delete if duplicates exist
+ self.existing_library_names = []
+ self.library_name = self.params.get('library_name')
+ self.library_description = self.params.get('library_description')
+ self.library_type = self.params.get('library_type')
+ self.library_types = dict()
+ self.subscription_url = self.params.get('subscription_url')
+ self.ssl_thumbprint = self.params.get('ssl_thumbprint')
+ self.datastore_name = self.params.get('datastore_name')
+ self.update_on_demand = self.params.get('update_on_demand')
+ self.library_types = {
+ 'local': self.content_service.content.LocalLibrary,
+ 'subscribed': self.content_service.content.SubscribedLibrary
+ }
+
+ # Import objects of both types to prevent duplicate names
+ self.get_all_libraries(self.library_types['local'])
+ self.get_all_libraries(self.library_types['subscribed'])
+
+ # Set library type for create/update actions
+ self.library_service = self.library_types[self.library_type]
+ self.pyv = PyVmomi(module=module)
+
+ def process_state(self):
+ """
+ Manage states of Content Library
+ """
+ self.desired_state = self.params.get('state')
+ library_states = {
+ 'absent': {
+ 'present': self.state_destroy_library,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_update_library,
+ 'absent': self.state_create_library,
+ }
+ }
+ library_states[self.desired_state][self.check_content_library_status()]()
+
+ def get_all_libraries(self, library_service):
+ content_libs = library_service.list()
+ if content_libs:
+ for content_lib in content_libs:
+ lib_details = library_service.get(content_lib)
+ lib_dict = dict(
+ lib_name=lib_details.name,
+ lib_description=lib_details.description,
+ lib_id=lib_details.id,
+ lib_type=lib_details.type
+ )
+ if lib_details.type == "SUBSCRIBED":
+ lib_dict["lib_sub_url"] = lib_details.subscription_info.subscription_url
+ lib_dict["lib_sub_on_demand"] = lib_details.subscription_info.on_demand
+ lib_dict["lib_sub_ssl_thumbprint"] = lib_details.subscription_info.ssl_thumbprint
+
+ self.local_libraries[lib_details.name] = lib_dict
+ self.existing_library_names.append(lib_details.name)
+
+ def check_content_library_status(self):
+ """
+ Check if Content Library exists or not
+ Returns: 'present' if library found, else 'absent'
+
+ """
+ ret = 'present' if self.library_name in self.local_libraries else 'absent'
+ return ret
+
+ def fail_when_duplicated(self):
+ if self.existing_library_names.count(self.library_name) > 1:
+ self.module.fail_json(msg="Operation cannot continue, library [%s] is not unique" % self.library_name)
+
+ def state_exit_unchanged(self):
+ """
+ Return unchanged state
+
+ """
+ self.module.exit_json(changed=False)
+
+ def set_subscription_spec(self):
+ if "https:" in self.subscription_url and not self.ssl_thumbprint:
+ self.module.fail_json(msg="While using HTTPS, a SSL thumbprint must be provided.")
+ subscription_info = SubscriptionInfo()
+ subscription_info.on_demand = self.update_on_demand
+ subscription_info.automatic_sync_enabled = True
+ subscription_info.subscription_url = self.subscription_url
+
+ if "https:" in self.subscription_url:
+ subscription_info.ssl_thumbprint = self.ssl_thumbprint
+ return subscription_info
+
+ def create_update(self, spec, library_id=None, update=False):
+ """
+ Create or update call and exit cleanly if call completes
+ """
+ if self.module.check_mode:
+ action = 'would be updated' if update else 'would be created'
+ else:
+ try:
+ if update:
+ self.library_service.update(library_id, spec)
+ action = "updated"
+ else:
+ library_id = self.library_service.create(
+ create_spec=spec,
+ client_token=str(uuid.uuid4())
+ )
+ action = "created"
+ except ResourceInaccessible as e:
+ message = ("vCenter Failed to make connection to %s with exception: %s "
+ "If using HTTPS, check that the SSL thumbprint is valid" % (self.subscription_url, str(e)))
+ self.module.fail_json(msg=message)
+
+ content_library_info = dict(
+ msg="Content Library '%s' %s." % (spec.name, action),
+ library_id=library_id,
+ library_description=self.library_description,
+ library_type=spec.type,
+ )
+ if spec.type == "SUBSCRIBED":
+ content_library_info["library_subscription_url"] = spec.subscription_info.subscription_url
+ content_library_info["library_subscription_on_demand"] = spec.subscription_info.on_demand
+ content_library_info["library_subscription_ssl_thumbprint"] = spec.subscription_info.ssl_thumbprint
+ self.module.exit_json(
+ changed=True,
+ content_library_info=content_library_info
+ )
+
+ def state_create_library(self):
+ # Fail if no datastore is specified
+ if not self.datastore_name:
+ self.module.fail_json(msg="datastore_name must be specified for create operations")
+ # Find the datastore by the given datastore name
+ datastore_id = self.pyv.find_datastore_by_name(datastore_name=self.datastore_name)
+ if not datastore_id:
+ self.module.fail_json(msg="Failed to find the datastore %s" % self.datastore_name)
+ self.datastore_id = datastore_id._moId
+ # Build the storage backing for the library to be created
+ storage_backings = []
+ storage_backing = StorageBacking(type=StorageBacking.Type.DATASTORE, datastore_id=self.datastore_id)
+ storage_backings.append(storage_backing)
+
+ # Build the specification for the library to be created
+ create_spec = LibraryModel()
+ create_spec.name = self.library_name
+ create_spec.description = self.library_description
+ self.library_types = {'local': create_spec.LibraryType.LOCAL,
+ 'subscribed': create_spec.LibraryType.SUBSCRIBED}
+ create_spec.type = self.library_types[self.library_type]
+ create_spec.storage_backings = storage_backings
+
+ # Build subscribed specification
+ if self.library_type == "subscribed":
+ subscription_info = self.set_subscription_spec()
+ subscription_info.authentication_method = SubscriptionInfo.AuthenticationMethod.NONE
+ create_spec.subscription_info = subscription_info
+
+ self.create_update(spec=create_spec)
+
+ def state_update_library(self):
+ """
+ Update Content Library
+
+ """
+ self.fail_when_duplicated()
+ changed = False
+ library_id = self.local_libraries[self.library_name]['lib_id']
+
+ library_update_spec = LibraryModel()
+
+ # Ensure library types are consistent
+ existing_library_type = self.local_libraries[self.library_name]['lib_type'].lower()
+ if existing_library_type != self.library_type:
+ self.module.fail_json(msg="Library [%s] is of type %s, cannot be changed to %s" %
+ (self.library_name, existing_library_type, self.library_type))
+
+ # Compare changeable subscribed attributes
+ if self.library_type == "subscribed":
+ existing_subscription_url = self.local_libraries[self.library_name]['lib_sub_url']
+ sub_url_changed = (existing_subscription_url != self.subscription_url)
+
+ existing_on_demand = self.local_libraries[self.library_name]['lib_sub_on_demand']
+ sub_on_demand_changed = (existing_on_demand != self.update_on_demand)
+
+ sub_ssl_thumbprint_changed = False
+ if "https:" in self.subscription_url and self.ssl_thumbprint:
+ existing_ssl_thumbprint = self.local_libraries[self.library_name]['lib_sub_ssl_thumbprint']
+ sub_ssl_thumbprint_changed = (existing_ssl_thumbprint != self.ssl_thumbprint)
+
+ if sub_url_changed or sub_on_demand_changed or sub_ssl_thumbprint_changed:
+ subscription_info = self.set_subscription_spec()
+ library_update_spec.subscription_info = subscription_info
+ changed = True
+
+ # Compare description
+ library_desc = self.local_libraries[self.library_name]['lib_description']
+ desired_lib_desc = self.params.get('library_description')
+ if library_desc != desired_lib_desc:
+ library_update_spec.description = desired_lib_desc
+ changed = True
+
+ if changed:
+ library_update_spec.name = self.library_name
+ self.create_update(spec=library_update_spec, library_id=library_id, update=True)
+
+ content_library_info = dict(msg="Content Library %s is unchanged." % self.library_name, library_id=library_id)
+ self.module.exit_json(changed=False,
+ content_library_info=dict(msg=content_library_info, library_id=library_id))
+
+ def state_destroy_library(self):
+ """
+ Delete Content Library
+
+ """
+ self.fail_when_duplicated()
+ library_id = self.local_libraries[self.library_name]['lib_id']
+ # Setup library service based on existing object type to allow library_type to unspecified
+ library_service = self.library_types[self.local_libraries[self.library_name]['lib_type'].lower()]
+ if self.module.check_mode:
+ action = 'would be deleted'
+ else:
+ action = 'deleted'
+ library_service.delete(library_id=library_id)
+ self.module.exit_json(
+ changed=True,
+ content_library_info=dict(
+ msg="Content Library '%s' %s." % (self.library_name, action),
+ library_id=library_id
+ )
+ )
+
+
+def main():
+ argument_spec = VmwareRestClient.vmware_client_argument_spec()
+ argument_spec.update(
+ library_name=dict(type='str', required=True),
+ library_description=dict(type='str', required=False),
+ library_type=dict(type='str', required=False, choices=['local', 'subscribed'], default='local'),
+ datastore_name=dict(type='str', required=False, aliases=['datastore']),
+ state=dict(type='str', choices=['present', 'absent'], default='present', required=False),
+ subscription_url=dict(type='str', default='', required=False),
+ ssl_thumbprint=dict(type='str', default='', required=False),
+ update_on_demand=dict(type='bool', default=False, required=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('library_type', 'subscribed', ['subscription_url']),
+ ],
+ )
+
+ vmware_contentlib_create = VmwareContentLibCreate(module)
+ vmware_contentlib_create.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_custom_attribute.py b/ansible_collections/community/vmware/plugins/modules/vmware_custom_attribute.py
new file mode 100644
index 000000000..618aa6fe0
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_custom_attribute.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_custom_attribute
+version_added: '3.2.0'
+short_description: Manage custom attributes definitions
+description:
+ - This module can be used to add and remove custom attributes definitions for various vSphere objects.
+author:
+ - Mario Lenz (@mariolenz)
+options:
+ custom_attribute:
+ description:
+ - Name of the custom attribute.
+ required: true
+ type: str
+ object_type:
+ description:
+ - Type of the object the custom attribute is associated with.
+ type: str
+ choices:
+ - Cluster
+ - Datacenter
+ - Datastore
+ - DistributedVirtualPortgroup
+ - DistributedVirtualSwitch
+ - Folder
+ - Global
+ - HostSystem
+ - ResourcePool
+ - VirtualMachine
+ required: true
+ state:
+ description:
+ - Manage definition of custom attributes.
+ - If set to C(present) and definition not present, then custom attribute definition is created.
+ - If set to C(present) and definition is present, then no action taken.
+ - If set to C(absent) and definition is present, then custom attribute definition is removed.
+ - If set to C(absent) and definition is absent, then no action taken.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add VM Custom Attribute Definition
+ community.vmware.vmware_custom_attribute:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ state: present
+ object_type: VirtualMachine
+ custom_attribute: custom_attr_def_1
+ delegate_to: localhost
+ register: defs
+
+- name: Remove VM Custom Attribute Definition
+ community.vmware.vmware_custom_attribute:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ state: absent
+ object_type: VirtualMachine
+ custom_attribute: custom_attr_def_1
+ delegate_to: localhost
+ register: defs
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+
+class CustomAttribute(PyVmomi):
+ def __init__(self, module):
+ super(CustomAttribute, self).__init__(module)
+
+ if not self.is_vcenter():
+ self.module.fail_json(msg="You have to connect to a vCenter server!")
+
+ object_types_map = {
+ 'Cluster': vim.ClusterComputeResource,
+ 'Datacenter': vim.Datacenter,
+ 'Datastore': vim.Datastore,
+ 'DistributedVirtualPortgroup': vim.DistributedVirtualPortgroup,
+ 'DistributedVirtualSwitch': vim.DistributedVirtualSwitch,
+ 'Folder': vim.Folder,
+ 'Global': None,
+ 'HostSystem': vim.HostSystem,
+ 'ResourcePool': vim.ResourcePool,
+ 'VirtualMachine': vim.VirtualMachine
+ }
+
+ self.object_type = object_types_map[self.params['object_type']]
+
+ def remove_custom_def(self, field):
+ changed = False
+ for x in self.custom_field_mgr:
+ if x.name == field and x.managedObjectType == self.object_type:
+ changed = True
+ if not self.module.check_mode:
+ self.content.customFieldsManager.RemoveCustomFieldDef(key=x.key)
+ break
+ return {'changed': changed, 'failed': False}
+
+ def add_custom_def(self, field):
+ changed = False
+ found = False
+ for x in self.custom_field_mgr:
+ if x.name == field and x.managedObjectType == self.object_type:
+ found = True
+ break
+
+ if not found:
+ changed = True
+ if not self.module.check_mode:
+ self.content.customFieldsManager.AddFieldDefinition(name=field, moType=self.object_type)
+ return {'changed': changed, 'failed': False}
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ custom_attribute=dict(type='str', no_log=False, required=True),
+ object_type=dict(type='str', required=True, choices=[
+ 'Cluster',
+ 'Datacenter',
+ 'Datastore',
+ 'DistributedVirtualPortgroup',
+ 'DistributedVirtualSwitch',
+ 'Folder',
+ 'Global',
+ 'HostSystem',
+ 'ResourcePool',
+ 'VirtualMachine'
+ ]),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ pyv = CustomAttribute(module)
+ results = dict(changed=False, custom_attribute_defs=list())
+ if module.params['state'] == "present":
+ results = pyv.add_custom_def(module.params['custom_attribute'])
+ elif module.params['state'] == "absent":
+ results = pyv.remove_custom_def(module.params['custom_attribute'])
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_custom_attribute_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_custom_attribute_manager.py
new file mode 100644
index 000000000..65ad207c4
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_custom_attribute_manager.py
@@ -0,0 +1,242 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright, (c) 2022, Mario Lenz <m@riolenz.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_custom_attribute_manager
+version_added: '3.2.0'
+short_description: Manage custom attributes from VMware for the given vSphere object
+description:
+ - This module can be used to add, remove and update custom attributes for the given vSphere object.
+author:
+ - Mario Lenz (@mariolenz)
+options:
+ custom_attributes:
+ description:
+ - A list of name and value of custom attributes that needs to be manage.
+ - Value of custom attribute is not required and will be ignored, if C(state) is set to C(absent).
+ suboptions:
+ name:
+ description:
+ - Name of the attribute.
+ type: str
+ required: true
+ value:
+ description:
+ - Value of the attribute.
+ type: str
+ default: ''
+ required: true
+ type: list
+ elements: dict
+ object_name:
+ description:
+ - Name of the vSphere object to work with.
+ type: str
+ required: true
+ object_type:
+ description:
+ - Type of the object the custom attribute is associated with.
+ type: str
+ choices:
+ - Cluster
+ - Datacenter
+ - Datastore
+ - DistributedVirtualPortgroup
+ - DistributedVirtualSwitch
+ - Folder
+ - HostSystem
+ - ResourcePool
+ - VirtualMachine
+ required: true
+ state:
+ description:
+ - If set to C(present), the custom attribute is set to the given value.
+ - If set to C(absent), the custom attribute is cleared. The given value is ignored in this case.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add virtual machine custom attributes
+ community.vmware.vmware_custom_attribute_manager:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ object_name: vm1
+ object_type: VirtualMachine
+ state: present
+ custom_attributes:
+ - name: MyAttribute
+ value: MyValue
+ delegate_to: localhost
+
+- name: Add multiple virtual machine custom attributes
+ community.vmware.vmware_custom_attribute_manager:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ object_name: vm1
+ object_type: VirtualMachine
+ state: present
+ custom_attributes:
+ - name: MyAttribute
+ value: MyValue
+ - name: MyAttribute2
+ value: MyValue2
+ delegate_to: localhost
+
+- name: Remove virtual machine Attribute
+ community.vmware.vmware_custom_attribute_manager:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ object_name: vm1
+ object_type: VirtualMachine
+ state: absent
+ custom_attributes:
+ - name: MyAttribute
+ delegate_to: localhost
+ register: attributes
+'''
+
+RETURN = r'''
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_obj
+
+
+class CustomAttributeManager(PyVmomi):
+ def __init__(self, module):
+ super(CustomAttributeManager, self).__init__(module)
+
+ if not self.is_vcenter():
+ self.module.fail_json(msg="You have to connect to a vCenter server!")
+
+ object_types_map = {
+ 'Cluster': vim.ClusterComputeResource,
+ 'Datacenter': vim.Datacenter,
+ 'Datastore': vim.Datastore,
+ 'DistributedVirtualPortgroup': vim.DistributedVirtualPortgroup,
+ 'DistributedVirtualSwitch': vim.DistributedVirtualSwitch,
+ 'Folder': vim.Folder,
+ 'HostSystem': vim.HostSystem,
+ 'ResourcePool': vim.ResourcePool,
+ 'VirtualMachine': vim.VirtualMachine
+ }
+
+ self.object_type = object_types_map[self.params['object_type']]
+
+ self.object_name = self.params['object_name']
+ self.obj = find_obj(self.content, [self.object_type], self.params['object_name'])
+ if self.obj is None:
+ module.fail_json(msg="Unable to manage custom attributes for non-existing"
+ " object %s." % self.object_name)
+
+ self.ca_list = self.params['custom_attributes'].copy()
+
+ for ca in self.ca_list:
+ for av_field in self.obj.availableField:
+ if av_field.name == ca['name']:
+ ca['key'] = av_field.key
+ break
+
+ for ca in self.ca_list:
+ if 'key' not in ca:
+ self.module.fail_json(msg="Custom attribute %s does not exist for object type %s." % (ca['name'], self.params['object_type']))
+
+ def set_custom_attributes(self):
+ changed = False
+ obj_cas_set = [x.key for x in self.obj.value]
+
+ for ca in self.ca_list:
+ if ca['key'] not in obj_cas_set:
+ changed = True
+ if not self.module.check_mode:
+ self.content.customFieldsManager.SetField(entity=self.obj, key=ca['key'], value=ca['value'])
+ continue
+ for x in self.obj.customValue:
+ if ca['key'] == x.key and ca['value'] != x.value:
+ changed = True
+ if not self.module.check_mode:
+ self.content.customFieldsManager.SetField(entity=self.obj, key=ca['key'], value=ca['value'])
+
+ return {'changed': changed, 'failed': False}
+
+ def remove_custom_attributes(self):
+ changed = False
+
+ for ca in self.ca_list:
+ for x in self.obj.customValue:
+ if ca['key'] == x.key and x.value != '':
+ changed = True
+ if not self.module.check_mode:
+ self.content.customFieldsManager.SetField(entity=self.obj, key=ca['key'], value='')
+
+ return {'changed': changed, 'failed': False}
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ custom_attributes=dict(
+ type='list',
+ required=True,
+ elements='dict',
+ options=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str', default=''),
+ )
+ ),
+ object_name=dict(type='str', required=True),
+ object_type=dict(type='str', required=True, choices=[
+ 'Cluster',
+ 'Datacenter',
+ 'Datastore',
+ 'DistributedVirtualPortgroup',
+ 'DistributedVirtualSwitch',
+ 'Folder',
+ 'HostSystem',
+ 'ResourcePool',
+ 'VirtualMachine'
+ ]),
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ pyv = CustomAttributeManager(module)
+ results = {'changed': False, 'failed': False}
+
+ if module.params['state'] == "present":
+ results = pyv.set_custom_attributes()
+ elif module.params['state'] == "absent":
+ results = pyv.remove_custom_attributes()
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_datacenter.py b/ansible_collections/community/vmware/plugins/modules/vmware_datacenter.py
new file mode 100644
index 000000000..8978b5999
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_datacenter.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_datacenter
+short_description: Manage VMware vSphere Datacenters
+description:
+ - This module can be used to manage (create, delete) VMware vSphere Datacenters.
+author:
+- Joseph Callen (@jcpowermac)
+- Kamil Szczygiel (@kamsz)
+options:
+ datacenter_name:
+ description:
+ - The name of the datacenter the cluster will be created in.
+ required: true
+ type: str
+ state:
+ description:
+ - If the datacenter should be present or absent.
+ choices: [ present, absent ]
+ default: present
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create Datacenter
+ community.vmware.vmware_datacenter:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: '{{ datacenter_name }}'
+ state: present
+ delegate_to: localhost
+
+- name: Delete Datacenter
+ community.vmware.vmware_datacenter:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: '{{ datacenter_name }}'
+ state: absent
+ delegate_to: localhost
+ register: datacenter_delete_result
+'''
+
+RETURN = r'''#
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, find_datacenter_by_name, vmware_argument_spec, wait_for_task
+from ansible.module_utils._text import to_native
+
+
+class VmwareDatacenterManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareDatacenterManager, self).__init__(module)
+ self.datacenter_name = self.params.get('datacenter_name')
+ self.datacenter_obj = self.get_datacenter()
+
+ def ensure(self):
+ state = self.module.params.get('state')
+
+ if state == 'present':
+ self.create_datacenter()
+
+ if state == 'absent':
+ self.destroy_datacenter()
+
+ def get_datacenter(self):
+ try:
+ datacenter_obj = find_datacenter_by_name(self.content, self.datacenter_name)
+ return datacenter_obj
+ except (vmodl.MethodFault, vmodl.RuntimeFault) as runtime_fault:
+ self.module.fail_json(msg="Failed to get datacenter '%s'"
+ " due to : %s" % (self.datacenter_name,
+ to_native(runtime_fault.msg)))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to get datacenter"
+ " '%s' due to generic error: %s" % (self.datacenter_name,
+ to_native(generic_exc)))
+
+ def create_datacenter(self):
+ folder = self.content.rootFolder
+ changed = False
+ try:
+ if not self.datacenter_obj and not self.module.check_mode:
+ changed = True
+ folder.CreateDatacenter(name=self.datacenter_name)
+ self.module.exit_json(changed=changed)
+ except vim.fault.DuplicateName:
+ self.module.exit_json(changed=changed)
+ except vim.fault.InvalidName as invalid_name:
+ self.module.fail_json(msg="Specified datacenter name '%s' is an"
+ " invalid name : %s" % (self.datacenter_name,
+ to_native(invalid_name.msg)))
+ except vmodl.fault.NotSupported as not_supported:
+ # This should never happen
+ self.module.fail_json(msg="Trying to create a datacenter '%s' on"
+ " an incorrect folder object : %s" % (self.datacenter_name,
+ to_native(not_supported.msg)))
+ except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
+ self.module.fail_json(msg="Failed to create a datacenter"
+ " '%s' due to : %s" % (self.datacenter_name,
+ to_native(runtime_fault.msg)))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to create a datacenter"
+ " '%s' due to generic error: %s" % (self.datacenter_name,
+ to_native(generic_exc)))
+
+ def destroy_datacenter(self):
+ results = dict(changed=False)
+ try:
+ if self.datacenter_obj and not self.module.check_mode:
+ task = self.datacenter_obj.Destroy_Task()
+ changed, result = wait_for_task(task)
+ results['changed'] = changed
+ results['result'] = result
+ self.module.exit_json(**results)
+ except (vim.fault.VimFault, vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
+ self.module.fail_json(msg="Failed to delete a datacenter"
+ " '%s' due to : %s" % (self.datacenter_name,
+ to_native(runtime_fault.msg)))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to delete a datacenter"
+ " '%s' due to generic error: %s" % (self.datacenter_name,
+ to_native(generic_exc)))
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ datacenter_name=dict(required=True, type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vmware_datacenter_mgr = VmwareDatacenterManager(module)
+ vmware_datacenter_mgr.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_datacenter_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_datacenter_info.py
new file mode 100644
index 000000000..de1f7addf
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_datacenter_info.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_datacenter_info
+short_description: Gather information about VMware vSphere Datacenters
+description:
+ - This module can be used to gather information VMware vSphere Datacenters.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ datacenter:
+ description:
+ - The name of the datacenter to gather information for.
+ - If not provided, will gather information about all datacenters from the VMware infra.
+ type: str
+ aliases: ['datacenter_name']
+ schema:
+ description:
+ - Specify the output schema desired.
+ - The 'summary' output schema is the legacy output from the module.
+ - The 'vsphere' output schema is the vSphere API class definition which requires pyvmomi>6.7.1.
+ choices: ['summary', 'vsphere']
+ default: 'summary'
+ type: str
+ properties:
+ description:
+ - Specify the properties to retrieve.
+ - If not specified, all properties are retrieved (deeply).
+ - Results are returned in a structure identical to the vSphere API.
+ - 'Example:'
+ - ' properties: ['
+ - ' "overallStatus"'
+ - ' ]'
+ - Only valid when C(schema) is C(vsphere).
+ type: list
+ elements: str
+ show_tag:
+ description:
+ - Tags related to Datacenter are shown if set to C(true).
+ default: false
+ type: bool
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Gather information about all datacenters
+ community.vmware.vmware_datacenter_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ delegate_to: localhost
+
+- name: Gather information about a particular datacenter
+ community.vmware.vmware_datacenter_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: '{{ datacenter_name }}'
+ delegate_to: localhost
+
+- name: Gather information about a particular datacenter
+ community.vmware.vmware_datacenter_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: '{{ datacenter_name }}'
+ show_tag: true
+ delegate_to: localhost
+
+- name: Gather vSphere schema information
+ community.vmware.vmware_datacenter_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: '{{ datacenter_name }}'
+ schema: vsphere
+ properties:
+ - configStatus
+ - overallStatus
+'''
+
+RETURN = r'''
+datacenter_info:
+ description: Information about datacenter
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "configStatus": "gray",
+ "moid": "datacenter-2",
+ "name": "Asia-Datacenter1"
+ }
+ ]
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+
+
+class VmwareDatacenterInfo(PyVmomi):
+ def __init__(self, module):
+ super(VmwareDatacenterInfo, self).__init__(module)
+ if self.params.get('show_tag'):
+ self.vmware_client = VmwareRestClient(self.module)
+
+ def get_datacenter_info(self):
+ self.datacenter_name = self.params.get('datacenter')
+ results = dict(
+ changed=False,
+ datacenter_info=[],
+ )
+
+ datacenter_objs = self.get_managed_objects_properties(vim_type=vim.Datacenter, properties=['name'])
+ dcs = []
+ for dc_obj in datacenter_objs:
+ if len(dc_obj.propSet) == 1:
+ if self.datacenter_name is not None:
+ if dc_obj.propSet[0].val == to_native(self.datacenter_name):
+ dcs.append(dc_obj.obj)
+ continue
+ else:
+ dcs.append(dc_obj.obj)
+
+ for obj in dcs:
+ if obj is None:
+ continue
+ temp_dc = dict(
+ name=obj.name,
+ moid=obj._moId,
+ )
+ if self.module.params['schema'] == 'summary':
+ temp_dc.update(
+ dict(
+ config_status=obj.configStatus,
+ overall_status=obj.overallStatus,
+ )
+ )
+ else:
+ temp_dc.update(self.to_json(obj, self.params.get('properties')))
+ if self.params.get('show_tag'):
+ temp_dc.update({
+ 'tags': self.vmware_client.get_tags_for_datacenter(datacenter_mid=obj._moId)
+ })
+
+ results['datacenter_info'].append(temp_dc)
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ datacenter=dict(type='str', aliases=['datacenter_name']),
+ schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'),
+ properties=dict(type='list', elements='str'),
+ show_tag=dict(type='bool', default=False),
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ vmware_datacenter_mgr = VmwareDatacenterInfo(module)
+ vmware_datacenter_mgr.get_datacenter_info()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_datastore.py b/ansible_collections/community/vmware/plugins/modules/vmware_datastore.py
new file mode 100644
index 000000000..b26ebbc9e
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_datastore.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Tim Rightnour <thegarbledone@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: vmware_datastore
+short_description: Configure Datastores
+version_added: '3.0.0'
+description:
+ - Configure Storage I/O Control Settings of a Datastore.
+author:
+ - Nina Loser (@Nina2244)
+options:
+ name:
+ description: Name of the datastore.
+ required: true
+ type: str
+ datacenter:
+ description:
+ - Datacenter to search for the datastores.
+ aliases: ['datacenter_name']
+ type: str
+ storage_io_control:
+ description:
+ - Specify datastore typ.
+ type: str
+ choices: ['enable_io_statistics', 'enable_statistics', 'disable']
+ required: true
+ congestion_threshold_percentage:
+ description:
+ - Storage I/O congestion threshold in percentage of peak throughput.
+ - "A value between 50% and 100%."
+ - "Recommended: 90%"
+ - Only use C(congestion_threshold_percentage) or C(congestion_threshold_manual).
+ - Only valid when C(storage_io_control) is C(enable_io_statistics).
+ type: int
+ default: 90
+ congestion_threshold_manual:
+ description:
+ - Storage I/O congestion threshold in ms.
+ - Only use C(congestion_threshold_percentage) or C(congestion_threshold_manual).
+ - Only valid when C(storage_io_control) is C(enable_io_statistics).
+ type: int
+ statistic_collection:
+ description:
+ - Include I/O statistics for SDRS.
+ - Only valid when C(storage_io_control) is C(enable_io_statistics) or C(enable_statistics).
+ type: bool
+ default: true
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure Storage I/O Control of an mounted datastore
+ community.vmware.vmware_datastore_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: '{{ datacenter_name }}'
+ name: datastore1
+ storage_io_control: 'enable_io_statistics'
+ congestion_threshold_manual: 30
+ statistic_collection: true
+ delegate_to: localhost
+ register: info
+
+'''
+
+RETURN = r'''
+result:
+ description: Information about datastore operation.
+ returned: always
+ type: str
+ sample: "Datastore configured successfully."
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ vmware_argument_spec,
+ wait_for_task,
+ TaskError)
+
+
+class VMwareDatastore(PyVmomi):
+ def __init__(self, module):
+ super(VMwareDatastore, self).__init__(module)
+
+ if self.module.params.get('congestion_threshold_percentage') not in range(50, 101):
+ self.module.fail_json(msg="Congestion Threshold should be between 50% and 100%.")
+
+ self.datacenter_name = self.module.params.get('datacenter')
+ if self.datacenter_name:
+ self.datacenter = self.find_datacenter_by_name(self.datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name)
+ else:
+ self.datacenter = None
+
+ self.datastore_name = self.module.params.get('name')
+ self.datastore = self.find_datastore_by_name(self.datastore_name, self.datacenter)
+ if self.datastore is None:
+ self.module.fail_json(msg="Datastore %s does not exist." % self.name)
+
+ self.storageResourceManager = self.content.storageResourceManager
+
+ def check_config_diff(self):
+ """
+ Check configuration diff
+ Returns: True if there is diff, else False
+ """
+ iormConfiguration = self.datastore.iormConfiguration
+
+ conf_statsAggregationDisabled = not self.module.params.get('statistic_collection')
+
+ if self.module.params.get('storage_io_control') == "enable_io_statistics":
+ if self.module.params.get('congestion_threshold_manual') is not None:
+ conf_congestionThresholdMode = 'manual'
+ conf_congestionThreshold = self.module.params.get('congestion_threshold_manual')
+ conf_percentOfPeakThroughput = iormConfiguration.percentOfPeakThroughput
+
+ else:
+ conf_congestionThresholdMode = 'automatic'
+ conf_percentOfPeakThroughput = self.module.params.get('congestion_threshold_percentage')
+ conf_congestionThreshold = iormConfiguration.congestionThreshold
+
+ if iormConfiguration.enabled and \
+ iormConfiguration.statsCollectionEnabled and \
+ iormConfiguration.statsAggregationDisabled == conf_statsAggregationDisabled and \
+ iormConfiguration.congestionThresholdMode == conf_congestionThresholdMode and \
+ iormConfiguration.congestionThreshold == conf_congestionThreshold and \
+ iormConfiguration.percentOfPeakThroughput == conf_percentOfPeakThroughput:
+ return False
+ else:
+ return True
+
+ elif self.module.params.get('storage_io_control') == "enable_statistics":
+ if not iormConfiguration.enabled and \
+ iormConfiguration.statsCollectionEnabled and \
+ iormConfiguration.statsAggregationDisabled == conf_statsAggregationDisabled:
+ return False
+ else:
+ return True
+
+ elif self.module.params.get('storage_io_control') == "disable":
+ if not iormConfiguration.enabled and \
+ not iormConfiguration.statsCollectionEnabled:
+ return False
+ else:
+ return True
+
+ def configure(self):
+ """
+ Manage configuration
+ """
+ changed = self.check_config_diff()
+
+ if changed:
+ if not self.module.check_mode:
+ config_spec = vim.StorageResourceManager.IORMConfigSpec()
+
+ iormConfiguration = self.datastore.iormConfiguration
+
+ conf_statsAggregationDisabled = not self.module.params.get('statistic_collection')
+
+ if self.module.params.get('storage_io_control') == "enable_io_statistics":
+ if self.module.params.get('congestion_threshold_manual') is not None:
+ config_spec.congestionThresholdMode = 'manual'
+ config_spec.congestionThreshold = self.module.params.get('congestion_threshold_manual')
+ config_spec.percentOfPeakThroughput = iormConfiguration.percentOfPeakThroughput
+
+ else:
+ config_spec.congestionThresholdMode = 'automatic'
+ config_spec.percentOfPeakThroughput = self.module.params.get('congestion_threshold_percentage')
+ config_spec.congestionThreshold = iormConfiguration.congestionThreshold
+
+ config_spec.enabled = True
+ config_spec.statsCollectionEnabled = True
+ config_spec.statsAggregationDisabled = conf_statsAggregationDisabled
+
+ elif self.module.params.get('storage_io_control') == "enable_statistics":
+ config_spec.enabled = False
+ config_spec.statsCollectionEnabled = True
+ config_spec.statsAggregationDisabled = conf_statsAggregationDisabled
+
+ elif self.module.params.get('storage_io_control') == "disable":
+ config_spec.enabled = False
+ config_spec.statsCollectionEnabled = False
+
+ try:
+ task = self.storageResourceManager.ConfigureDatastoreIORM_Task(self.datastore, config_spec)
+ changed, result = wait_for_task(task)
+ except TaskError as generic_exc:
+ self.module.fail_json(msg=to_native(generic_exc))
+ except Exception as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+ else:
+ changed = True
+
+ results = dict(changed=changed)
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ datacenter=dict(type='str', aliases=['datacenter_name']),
+ storage_io_control=dict(type='str', choices=['enable_io_statistics', 'enable_statistics', 'disable'], required=True),
+ congestion_threshold_percentage=dict(type='int', default=90, required=False),
+ congestion_threshold_manual=dict(type='int', required=False),
+ statistic_collection=dict(type='bool', default=True, required=False)
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['congestion_threshold_percentage', 'congestion_threshold_manual'],
+ ]
+ )
+
+ vmware_datastore = VMwareDatastore(module)
+ vmware_datastore.configure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_datastore_cluster.py b/ansible_collections/community/vmware/plugins/modules/vmware_datastore_cluster.py
new file mode 100644
index 000000000..a6170b19d
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_datastore_cluster.py
@@ -0,0 +1,294 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Ansible Project
+# Copyright (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_datastore_cluster
+short_description: Manage VMware vSphere datastore clusters
+description:
+ - This module can be used to add and delete datastore cluster in given VMware environment.
+ - All parameters and VMware object values are case sensitive.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ datacenter_name:
+ description:
+ - The name of the datacenter.
+ - You must specify either a C(datacenter_name) or a C(folder).
+ - Mutually exclusive with C(folder) parameter.
+ required: false
+ aliases: [ datacenter ]
+ type: str
+ datastore_cluster_name:
+ description:
+ - The name of the datastore cluster.
+ required: true
+ type: str
+ state:
+ description:
+ - If the datastore cluster should be present or absent.
+ choices: [ present, absent ]
+ default: present
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute path to place datastore cluster in.
+ - The folder should include the datacenter.
+ - This parameter is case sensitive.
+ - You must specify either a C(folder) or a C(datacenter_name).
+ - 'Examples:'
+ - ' folder: /datacenter1/datastore'
+ - ' folder: datacenter1/datastore'
+ - ' folder: /datacenter1/datastore/folder1'
+ - ' folder: datacenter1/datastore/folder1'
+ - ' folder: /folder1/datacenter1/datastore'
+ - ' folder: folder1/datacenter1/datastore'
+ - ' folder: /folder1/datacenter1/datastore/folder2'
+ required: false
+ type: str
+ enable_sdrs:
+ description:
+ - Whether or not storage DRS is enabled.
+ default: false
+ type: bool
+ required: false
+ automation_level:
+ description:
+ - Run SDRS automated or manually.
+ choices: [ automated, manual ]
+ default: manual
+ type: str
+ required: false
+ keep_vmdks_together:
+ description:
+ - Specifies whether or not each VM in this datastore cluster should have its virtual disks on the same datastore by default.
+ default: true
+ type: bool
+ required: false
+ loadbalance_interval:
+ description:
+ - Specify the interval in minutes that storage DRS runs to load balance among datastores.
+ default: 480
+ type: int
+ required: false
+ enable_io_loadbalance:
+ description:
+ - Whether or not storage DRS takes into account storage I/O workload when making load balancing and initial placement recommendations.
+ default: false
+ type: bool
+ required: false
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create datastore cluster and enable SDRS
+ community.vmware.vmware_datastore_cluster:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: '{{ datacenter_name }}'
+ datastore_cluster_name: '{{ datastore_cluster_name }}'
+ enable_sdrs: true
+ state: present
+ delegate_to: localhost
+
+- name: Create datastore cluster using folder
+ community.vmware.vmware_datastore_cluster:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ folder: '/{{ datacenter_name }}/datastore/ds_folder'
+ datastore_cluster_name: '{{ datastore_cluster_name }}'
+ state: present
+ delegate_to: localhost
+
+- name: Delete datastore cluster
+ community.vmware.vmware_datastore_cluster:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: '{{ datacenter_name }}'
+ datastore_cluster_name: '{{ datastore_cluster_name }}'
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: information about datastore cluster operation
+ returned: always
+ type: str
+ sample: "Datastore cluster 'DSC2' created successfully."
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task
+from ansible.module_utils._text import to_native
+
+
+class VMwareDatastoreClusterManager(PyVmomi):
+ def __init__(self, module):
+ super(VMwareDatastoreClusterManager, self).__init__(module)
+ folder = self.params['folder']
+ if folder:
+ self.folder_obj = self.content.searchIndex.FindByInventoryPath(folder)
+ if not self.folder_obj:
+ self.module.fail_json(msg="Failed to find the folder specified by %(folder)s" % self.params)
+ else:
+ datacenter_name = self.params.get('datacenter_name')
+ datacenter_obj = self.find_datacenter_by_name(datacenter_name)
+ if not datacenter_obj:
+ self.module.fail_json(msg="Failed to find datacenter '%s' required"
+ " for managing datastore cluster." % datacenter_name)
+ self.folder_obj = datacenter_obj.datastoreFolder
+
+ self.datastore_cluster_name = self.params.get('datastore_cluster_name')
+ self.datastore_cluster_obj = self.find_datastore_cluster_by_name(self.datastore_cluster_name)
+
+ def ensure(self):
+ """
+ Manage internal state of datastore cluster
+
+ """
+ results = dict(changed=False, result='')
+ state = self.module.params.get('state')
+ enable_sdrs = self.params.get('enable_sdrs')
+ automation_level = self.params.get('automation_level')
+ keep_vmdks_together = self.params.get('keep_vmdks_together')
+ enable_io_loadbalance = self.params.get('enable_io_loadbalance')
+ loadbalance_interval = self.params.get('loadbalance_interval')
+
+ if self.datastore_cluster_obj:
+ if state == 'present':
+ results['result'] = "Datastore cluster '%s' already available." % self.datastore_cluster_name
+ sdrs_spec = vim.storageDrs.ConfigSpec()
+ sdrs_spec.podConfigSpec = None
+ if enable_sdrs != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled:
+ if not sdrs_spec.podConfigSpec:
+ sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
+ sdrs_spec.podConfigSpec.enabled = enable_sdrs
+ results['result'] = results['result'] + " Changed SDRS to '%s'." % enable_sdrs
+ if automation_level != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.defaultVmBehavior:
+ if not sdrs_spec.podConfigSpec:
+ sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
+ sdrs_spec.podConfigSpec.defaultVmBehavior = automation_level
+ results['result'] = results['result'] + " Changed automation level to '%s'." % automation_level
+ if keep_vmdks_together != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.defaultIntraVmAffinity:
+ if not sdrs_spec.podConfigSpec:
+ sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
+ sdrs_spec.podConfigSpec.defaultIntraVmAffinity = keep_vmdks_together
+ results['result'] = results['result'] + " Changed VMDK affinity to '%s'." % keep_vmdks_together
+ if enable_io_loadbalance != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.ioLoadBalanceEnabled:
+ if not sdrs_spec.podConfigSpec:
+ sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
+ sdrs_spec.podConfigSpec.ioLoadBalanceEnabled = enable_io_loadbalance
+ results['result'] = results['result'] + " Changed I/O workload balancing to '%s'." % enable_io_loadbalance
+ if loadbalance_interval != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.loadBalanceInterval:
+ if not sdrs_spec.podConfigSpec:
+ sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
+ sdrs_spec.podConfigSpec.loadBalanceInterval = loadbalance_interval
+ results['result'] = results['result'] + " Changed load balance interval to '%s' minutes." % loadbalance_interval
+ if sdrs_spec.podConfigSpec:
+ if not self.module.check_mode:
+ try:
+ task = self.content.storageResourceManager.ConfigureStorageDrsForPod_Task(pod=self.datastore_cluster_obj,
+ spec=sdrs_spec, modify=True)
+ changed, result = wait_for_task(task)
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to configure datastore cluster"
+ " '%s' due to %s" % (self.datastore_cluster_name,
+ to_native(generic_exc)))
+ else:
+ changed = True
+ results['changed'] = changed
+ elif state == 'absent':
+ # Delete datastore cluster
+ if not self.module.check_mode:
+ task = self.datastore_cluster_obj.Destroy_Task()
+ changed, result = wait_for_task(task)
+ else:
+ changed = True
+ if changed:
+ results['result'] = "Datastore cluster '%s' deleted successfully." % self.datastore_cluster_name
+ results['changed'] = changed
+ else:
+ self.module.fail_json(msg="Failed to delete datastore cluster '%s'." % self.datastore_cluster_name)
+ else:
+ if state == 'present':
+ # Create datastore cluster
+ if not self.module.check_mode:
+ try:
+ self.datastore_cluster_obj = self.folder_obj.CreateStoragePod(name=self.datastore_cluster_name)
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to create datastore cluster"
+ " '%s' due to %s" % (self.datastore_cluster_name,
+ to_native(generic_exc)))
+ try:
+ sdrs_spec = vim.storageDrs.ConfigSpec()
+ sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec()
+ sdrs_spec.podConfigSpec.enabled = enable_sdrs
+ sdrs_spec.podConfigSpec.defaultVmBehavior = automation_level
+ sdrs_spec.podConfigSpec.defaultIntraVmAffinity = keep_vmdks_together
+ sdrs_spec.podConfigSpec.ioLoadBalanceEnabled = enable_io_loadbalance
+ sdrs_spec.podConfigSpec.loadBalanceInterval = loadbalance_interval
+ task = self.content.storageResourceManager.ConfigureStorageDrsForPod_Task(pod=self.datastore_cluster_obj, spec=sdrs_spec, modify=True)
+ changed, result = wait_for_task(task)
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to configure datastore cluster"
+ " '%s' due to %s" % (self.datastore_cluster_name,
+ to_native(generic_exc)))
+ results['changed'] = True
+ results['result'] = "Datastore cluster '%s' created successfully." % self.datastore_cluster_name
+ elif state == 'absent':
+ results['result'] = "Datastore cluster '%s' not available or already deleted." % self.datastore_cluster_name
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ datacenter_name=dict(type='str', required=False, aliases=['datacenter']),
+ datastore_cluster_name=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ folder=dict(type='str', required=False),
+ enable_sdrs=dict(type='bool', default=False, required=False),
+ keep_vmdks_together=dict(type='bool', default=True, required=False),
+ automation_level=dict(type='str', choices=['automated', 'manual'], default='manual'),
+ enable_io_loadbalance=dict(type='bool', default=False, required=False),
+ loadbalance_interval=dict(type='int', default=480, required=False)
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['datacenter_name', 'folder'],
+ ],
+ required_one_of=[
+ ['datacenter_name', 'folder'],
+ ]
+ )
+
+ datastore_cluster_mgr = VMwareDatastoreClusterManager(module)
+ datastore_cluster_mgr.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_datastore_cluster_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_datastore_cluster_manager.py
new file mode 100644
index 000000000..f22a98393
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_datastore_cluster_manager.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Ansible Project
+# Copyright (c) 2020, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_datastore_cluster_manager
+short_description: Manage VMware vSphere datastore cluster's members
+description:
+ - This module can be used to add datastore in the datastore cluster.
+ - All parameters and VMware object values are case sensitive.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ datacenter_name:
+ description:
+ - The name of the datacenter.
+ required: false
+ aliases: [ datacenter ]
+ type: str
+ datastore_cluster_name:
+ description:
+ - The name of the datastore cluster.
+ required: true
+ type: str
+ aliases: [ datastore_cluster ]
+ state:
+ description:
+ - If set to I(present), datastores specified by I(datastores) will be added to the given datastore cluster.
+ - If set to I(absent), datastores specified by I(datastores) will be moved from the given datastore cluster to datstore folder of the parent datacenter.
+ choices: [ present, absent ]
+ default: present
+ type: str
+ datastores:
+ description:
+ - A list of datastores to be manage.
+ type: list
+ elements: str
+ required: true
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Add datastore to the given datastore cluster
+ community.vmware.vmware_datastore_cluster_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: '{{ datacenter_name }}'
+ datastore_cluster_name: '{{ datastore_cluster_name }}'
+ datastores:
+ - ds_001
+ - ds_002
+ - ds_003
+ state: present
+ delegate_to: localhost
+
+- name: Move datastore from the given datastore cluster
+ community.vmware.vmware_datastore_cluster_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: '{{ datacenter_name }}'
+ datastore_cluster_name: '{{ datastore_cluster_name }}'
+ datastores:
+ - ds_001
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+datastore_cluster_info:
+ description: information about datastore cluster
+ returned: always
+ type: str
+ sample: {
+ "changed_datastores": ["ds_171_1"],
+ "current_datastores": [],
+ "msg": null,
+ "previous_datastores": ["ds_171_1"]
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, TaskError
+from ansible.module_utils._text import to_native
+
+
+class VMwareDatastoreClusterManager(PyVmomi):
+ def __init__(self, module):
+ """
+ Constructor
+
+ """
+ super(VMwareDatastoreClusterManager, self).__init__(module)
+ datacenter_name = self.params.get('datacenter_name')
+ datacenter_obj = self.find_datacenter_by_name(datacenter_name)
+ if not datacenter_obj:
+ self.module.fail_json(msg="Failed to find datacenter '%s' required"
+ " for managing datastore cluster." % datacenter_name)
+ self.folder_obj = datacenter_obj.datastoreFolder
+
+ self.datastore_cluster_name = self.params.get('datastore_cluster_name')
+ self.datastore_cluster_obj = self.find_datastore_cluster_by_name(self.datastore_cluster_name, datacenter=datacenter_obj)
+ if not self.datastore_cluster_obj:
+ self.module.fail_json(msg="Failed to find the datastore cluster '%s'" % self.datastore_cluster_name)
+
+ def get_datastore_cluster_children(self):
+ """
+ Return Datastore from the given datastore cluster object
+
+ """
+ return [ds for ds in self.datastore_cluster_obj.childEntity if isinstance(ds, vim.Datastore)]
+
+ def ensure(self):
+ """
+ Manage internal state of datastore cluster
+
+ """
+ changed = False
+ results = dict(
+ changed=changed,
+ )
+ temp_result = dict(
+ previous_datastores=[],
+ current_datastores=[],
+ msg=""
+ )
+ state = self.module.params.get('state')
+ datastores = self.module.params.get('datastores') or []
+ datastore_obj_list = []
+ dsc_child_obj = self.get_datastore_cluster_children()
+
+ if state == 'present':
+ temp_result['previous_datastores'] = [ds.name for ds in dsc_child_obj]
+ for datastore_name in datastores:
+ datastore_obj = self.find_datastore_by_name(datastore_name)
+ if not datastore_obj:
+ self.module.fail_json(msg="Failed to find datastore '%s'" % datastore_name)
+ if datastore_obj not in dsc_child_obj and datastore_obj not in datastore_obj_list:
+ datastore_obj_list.append(datastore_obj)
+
+ if self.module.check_mode:
+ changed_list = [ds.name for ds in datastore_obj_list]
+ temp_result['current_datastores'] = temp_result['previous_datastores'].extend(changed_list)
+ temp_result['changed_datastores'] = changed_list
+ results['changed'] = len(datastore_obj_list) > 0
+ results['datastore_cluster_info'] = temp_result
+ self.module.exit_json(**results)
+
+ try:
+ if datastore_obj_list:
+ task = self.datastore_cluster_obj.MoveIntoFolder_Task(list=datastore_obj_list)
+ changed, result = wait_for_task(task)
+ temp_result['msg'] = result
+ temp_result['changed_datastores'] = [ds.name for ds in datastore_obj_list]
+ temp_result['current_datastores'] = [ds.name for ds in self.get_datastore_cluster_children()]
+ except TaskError as generic_exc:
+ self.module.fail_json(msg=to_native(generic_exc))
+ except Exception as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+ elif state == 'absent':
+ temp_result['previous_datastores'] = [ds.name for ds in dsc_child_obj]
+ temp_result['current_datastores'] = [ds.name for ds in dsc_child_obj]
+ for datastore_name in datastores:
+ datastore_obj = self.find_datastore_by_name(datastore_name)
+ if not datastore_obj:
+ self.module.fail_json(msg="Failed to find datastore '%s'" % datastore_name)
+ if datastore_obj in dsc_child_obj and datastore_obj not in datastore_obj_list:
+ datastore_obj_list.append(datastore_obj)
+
+ if self.module.check_mode:
+ changed_list = [ds.name for ds in datastore_obj_list]
+ for ds in changed_list:
+ temp_result['current_datastores'].pop(ds)
+ temp_result['changed_datastores'] = changed_list
+ results['changed'] = len(datastore_obj_list) > 0
+ results['datastore_cluster_info'] = temp_result
+ self.module.exit_json(**results)
+
+ try:
+ if datastore_obj_list:
+ task = self.folder_obj.MoveIntoFolder_Task(list=datastore_obj_list)
+ changed, result = wait_for_task(task)
+ temp_result['msg'] = result
+ temp_result['changed_datastores'] = [ds.name for ds in datastore_obj_list]
+ temp_result['current_datastores'] = [ds.name for ds in self.get_datastore_cluster_children()]
+ except TaskError as generic_exc:
+ self.module.fail_json(msg=to_native(generic_exc))
+ except Exception as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+
+ results['changed'] = changed
+ results['datastore_cluster_info'] = temp_result
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ datacenter_name=dict(type='str', required=False, aliases=['datacenter']),
+ datastore_cluster_name=dict(type='str', required=True, aliases=['datastore_cluster']),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ datastores=dict(type='list', required=True, elements='str'),
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ datastore_cluster_mgr = VMwareDatastoreClusterManager(module)
+ datastore_cluster_mgr.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_datastore_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_datastore_info.py
new file mode 100644
index 000000000..ed3a85cb5
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_datastore_info.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Tim Rightnour <thegarbledone@gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_datastore_info
+short_description: Gather info about datastores available in given vCenter
+description:
+ - This module can be used to gather information about datastores in VMWare infrastructure.
+ - All values and VMware object names are case sensitive.
+author:
+ - Tim Rightnour (@garbled1)
+options:
+ name:
+ description:
+ - Name of the datastore to match.
+ - If set, information of specific datastores are returned.
+ required: false
+ type: str
+ datacenter:
+ description:
+ - Datacenter to search for datastores.
+ - This parameter is required, if C(cluster) is not supplied.
+ required: false
+ aliases: ['datacenter_name']
+ type: str
+ cluster:
+ description:
+ - Cluster to search for datastores.
+ - If set, information of datastores belonging this clusters will be returned.
+ - This parameter is required, if C(datacenter) is not supplied.
+ required: false
+ type: str
+ gather_nfs_mount_info:
+ description:
+ - Gather mount information of NFS datastores.
+ - Disabled per default because this slows down the execution if you have a lot of datastores.
+ - Only valid when C(schema) is C(summary).
+ type: bool
+ default: false
+ gather_vmfs_mount_info:
+ description:
+ - Gather mount information of VMFS datastores.
+ - Disabled per default because this slows down the execution if you have a lot of datastores.
+ - Only valid when C(schema) is C(summary).
+ type: bool
+ default: false
+ schema:
+ description:
+ - Specify the output schema desired.
+ - The 'summary' output schema is the legacy output from the module
+ - The 'vsphere' output schema is the vSphere API class definition
+ which requires pyvmomi>6.7.1
+ choices: ['summary', 'vsphere']
+ default: 'summary'
+ type: str
+ show_tag:
+ description:
+ - Tags related to Datastore are shown if set to C(true).
+ default: false
+ type: bool
+ properties:
+ description:
+ - Specify the properties to retrieve.
+ - If not specified, all properties are retrieved (deeply).
+ - Results are returned in a structure identical to the vsphere API.
+ - 'Example:'
+ - ' properties: ['
+ - ' "name",'
+ - ' "info.vmfs.ssd",'
+ - ' "capability.vsanSparseSupported",'
+ - ' "overallStatus"'
+ - ' ]'
+ - Only valid when C(schema) is C(vsphere).
+ type: list
+ required: false
+ elements: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info from standalone ESXi server having datacenter as 'ha-datacenter'
+ community.vmware.vmware_datastore_info:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ datacenter_name: "ha-datacenter"
+ delegate_to: localhost
+ register: info
+
+- name: Gather info from datacenter about specific datastore
+ community.vmware.vmware_datastore_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: '{{ datacenter_name }}'
+ name: datastore1
+ delegate_to: localhost
+ register: info
+
+- name: Gather some info from a datastore using the vSphere API output schema
+ community.vmware.vmware_datastore_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: '{{ datacenter_name }}'
+ schema: vsphere
+ properties:
+ - name
+ - info.vmfs.ssd
+ - capability.vsanSparseSupported
+ - overallStatus
+ delegate_to: localhost
+ register: info
+'''
+
+RETURN = r'''
+datastores:
+ description: metadata about the available datastores
+ returned: always
+ type: list
+ sample: [
+ {
+ "accessible": false,
+ "capacity": 42681237504,
+ "datastore_cluster": "datacluster0",
+ "freeSpace": 39638269952,
+ "maintenanceMode": "normal",
+ "multipleHostAccess": false,
+ "name": "datastore2",
+ "provisioned": 12289211488,
+ "type": "VMFS",
+ "uncommitted": 9246243936,
+ "url": "ds:///vmfs/volumes/5a69b18a-c03cd88c-36ae-5254001249ce/",
+ "vmfs_blockSize": 1024,
+ "vmfs_uuid": "5a69b18a-c03cd88c-36ae-5254001249ce",
+ "vmfs_version": "6.81"
+ },
+ {
+ "accessible": true,
+ "capacity": 5497558138880,
+ "datastore_cluster": "datacluster0",
+ "freeSpace": 4279000641536,
+ "maintenanceMode": "normal",
+ "multipleHostAccess": true,
+ "name": "datastore3",
+ "nfs_path": "/vol/datastore3",
+ "nfs_server": "nfs_server1",
+ "provisioned": 1708109410304,
+ "type": "NFS",
+ "uncommitted": 489551912960,
+ "url": "ds:///vmfs/volumes/420b3e73-67070776/"
+ },
+ ]
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ vmware_argument_spec,
+ get_all_objs,
+ find_cluster_by_name,
+ get_parent_datacenter)
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+
+
+class VMwareHostDatastore(PyVmomi):
+ """ This class populates the datastore list """
+
+ def __init__(self, module):
+ super(VMwareHostDatastore, self).__init__(module)
+ self.gather_nfs_mount_info = self.module.params['gather_nfs_mount_info']
+ self.gather_vmfs_mount_info = self.module.params['gather_vmfs_mount_info']
+ self.schema = self.module.params['schema']
+ self.properties = self.module.params['properties']
+ if self.module.params['show_tag']:
+ self.vmware_client = VmwareRestClient(self.module)
+
+ def check_datastore_host(self, esxi_host, datastore):
+ """ Get all datastores of specified ESXi host """
+ esxi = self.find_hostsystem_by_name(esxi_host)
+ if esxi is None:
+ self.module.fail_json(msg="Failed to find ESXi hostname %s " % esxi_host)
+ storage_system = esxi.configManager.storageSystem
+ host_file_sys_vol_mount_info = storage_system.fileSystemVolumeInfo.mountInfo
+ for host_mount_info in host_file_sys_vol_mount_info:
+ if host_mount_info.volume.name == datastore:
+ return host_mount_info
+ return None
+
+ def build_datastore_list(self, datastore_list):
+ """ Build list with datastores """
+ datastores = list()
+ for datastore in datastore_list:
+ if self.schema == 'summary':
+ summary = datastore.summary
+ datastore_summary = dict()
+ datastore_summary['accessible'] = summary.accessible
+ datastore_summary['capacity'] = summary.capacity
+ datastore_summary['name'] = summary.name
+ datastore_summary['freeSpace'] = summary.freeSpace
+ datastore_summary['maintenanceMode'] = summary.maintenanceMode
+ datastore_summary['multipleHostAccess'] = summary.multipleHostAccess
+ datastore_summary['type'] = summary.type
+ if self.gather_nfs_mount_info or self.gather_vmfs_mount_info:
+ if self.gather_nfs_mount_info and summary.type.startswith("NFS"):
+ # get mount info from the first ESXi host attached to this NFS datastore
+ host_mount_info = self.check_datastore_host(summary.datastore.host[0].key.name, summary.name)
+ datastore_summary['nfs_server'] = host_mount_info.volume.remoteHost
+ datastore_summary['nfs_path'] = host_mount_info.volume.remotePath
+ if self.gather_vmfs_mount_info and summary.type == "VMFS":
+ # get mount info from the first ESXi host attached to this VMFS datastore
+ host_mount_info = self.check_datastore_host(summary.datastore.host[0].key.name, summary.name)
+ datastore_summary['vmfs_blockSize'] = host_mount_info.volume.blockSize
+ datastore_summary['vmfs_version'] = host_mount_info.volume.version
+ datastore_summary['vmfs_uuid'] = host_mount_info.volume.uuid
+ # uncommitted is optional / not always set
+ if not summary.uncommitted:
+ summary.uncommitted = 0
+ datastore_summary['uncommitted'] = summary.uncommitted
+ datastore_summary['url'] = summary.url
+ # Calculated values
+ datastore_summary['provisioned'] = summary.capacity - summary.freeSpace + summary.uncommitted
+ datastore_summary['datastore_cluster'] = 'N/A'
+ if isinstance(datastore.parent, vim.StoragePod):
+ datastore_summary['datastore_cluster'] = datastore.parent.name
+
+ if self.module.params['show_tag']:
+ datastore_summary['tags'] = self.vmware_client.get_tags_for_datastore(datastore._moId)
+
+ if self.module.params['name']:
+ if datastore_summary['name'] == self.module.params['name']:
+ datastores.extend([datastore_summary])
+ else:
+ datastores.extend([datastore_summary])
+ else:
+ temp_ds = self.to_json(datastore, self.properties)
+ if self.module.params['show_tag']:
+ temp_ds.update({'tags': self.vmware_client.get_tags_for_datastore(datastore._moId)})
+ if self.module.params['name']:
+ if datastore.name == self.module.params['name']:
+ datastores.extend(([temp_ds]))
+ else:
+ datastores.extend(([temp_ds]))
+
+ return datastores
+
+
+class PyVmomiCache(object):
+ """ This class caches references to objects which are requested multiples times but not modified """
+
+ def __init__(self, content, dc_name=None):
+ self.content = content
+ self.dc_name = dc_name
+ self.clusters = {}
+ self.parent_datacenters = {}
+
+ def get_all_objs(self, content, types, confine_to_datacenter=True):
+ """ Wrapper around get_all_objs to set datacenter context """
+ objects = get_all_objs(content, types)
+ if confine_to_datacenter:
+ if hasattr(objects, 'items'):
+ # resource pools come back as a dictionary
+ for k, v in tuple(objects.items()):
+ parent_dc = get_parent_datacenter(k)
+ if parent_dc.name != self.dc_name:
+ del objects[k]
+ else:
+ # everything else should be a list
+ objects = [x for x in objects if get_parent_datacenter(x).name == self.dc_name]
+
+ return objects
+
+
+class PyVmomiHelper(PyVmomi):
+ """ This class gets datastores """
+
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
+
+ def lookup_datastore(self, confine_to_datacenter):
+ """ Get datastore(s) per ESXi host or vCenter server """
+ datastores = self.cache.get_all_objs(self.content, [vim.Datastore], confine_to_datacenter)
+ return datastores
+
+ def lookup_datastore_by_cluster(self):
+ """ Get datastore(s) per cluster """
+ cluster = find_cluster_by_name(self.content, self.params['cluster'])
+ if not cluster:
+ self.module.fail_json(msg='Failed to find cluster "%(cluster)s"' % self.params)
+ c_dc = cluster.datastore
+ return c_dc
+
+
+def main():
+ """ Main """
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ datacenter=dict(type='str', aliases=['datacenter_name']),
+ cluster=dict(type='str'),
+ gather_nfs_mount_info=dict(type='bool', default=False),
+ gather_vmfs_mount_info=dict(type='bool', default=False),
+ schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'),
+ properties=dict(type='list', elements='str'),
+ show_tag=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ result = dict(changed=False)
+
+ pyv = PyVmomiHelper(module)
+
+ if module.params['cluster']:
+ dxs = pyv.lookup_datastore_by_cluster()
+ elif module.params['datacenter']:
+ dxs = pyv.lookup_datastore(confine_to_datacenter=True)
+ else:
+ dxs = pyv.lookup_datastore(confine_to_datacenter=False)
+
+ vmware_host_datastore = VMwareHostDatastore(module)
+ datastores = vmware_host_datastore.build_datastore_list(dxs)
+
+ result['datastores'] = datastores
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_datastore_maintenancemode.py b/ansible_collections/community/vmware/plugins/modules/vmware_datastore_maintenancemode.py
new file mode 100644
index 000000000..df14e4bda
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_datastore_maintenancemode.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_datastore_maintenancemode
+short_description: Place a datastore into maintenance mode
+description:
+ - This module can be used to manage maintenance mode of a datastore.
+author:
+- "Abhijeet Kasurde (@Akasurde)"
+options:
+ datastore:
+ description:
+ - Name of datastore to manage.
+ - If C(datastore_cluster) or C(cluster_name) are not set, this parameter is required.
+ type: str
+ datastore_cluster:
+ description:
+ - Name of the datastore cluster from all child datastores to be managed.
+ - If C(datastore) or C(cluster_name) are not set, this parameter is required.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster where datastore is connected to.
+ - If multiple datastores are connected to the given cluster, then all datastores will be managed by C(state).
+ - If C(datastore) or C(datastore_cluster) are not set, this parameter is required.
+ type: str
+ state:
+ description:
+ - If set to C(present), then enter datastore into maintenance mode.
+ - If set to C(present) and datastore is already in maintenance mode, then no action will be taken.
+ - If set to C(absent) and datastore is in maintenance mode, then exit maintenance mode.
+ - If set to C(absent) and datastore is not in maintenance mode, then no action will be taken.
+ choices: [ present, absent ]
+ default: present
+ required: false
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Enter datastore into Maintenance Mode
+ community.vmware.vmware_datastore_maintenancemode:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datastore: '{{ datastore_name }}'
+ state: present
+ delegate_to: localhost
+
+- name: Enter all datastores under cluster into Maintenance Mode
+ community.vmware.vmware_datastore_maintenancemode:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ state: present
+ delegate_to: localhost
+
+- name: Enter all datastores under datastore cluster into Maintenance Mode
+ community.vmware.vmware_datastore_maintenancemode:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datastore_cluster: '{{ datastore_cluster_name }}'
+ state: present
+ delegate_to: localhost
+
+- name: Exit datastore into Maintenance Mode
+ community.vmware.vmware_datastore_maintenancemode:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datastore: '{{ datastore_name }}'
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+datastore_status:
+ description: Action taken for datastore
+ returned: always
+ type: dict
+ sample: {
+ "ds_226_01": "Datastore 'ds_226_01' is already in maintenance mode."
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ vmware_argument_spec,
+ wait_for_task,
+ find_cluster_by_name,
+ get_all_objs)
+from ansible.module_utils._text import to_native
+
+
+class VmwareDatastoreMaintenanceMgr(PyVmomi):
+ def __init__(self, module):
+ super(VmwareDatastoreMaintenanceMgr, self).__init__(module)
+ datastore_name = self.params.get('datastore')
+ cluster_name = self.params.get('cluster_name')
+ datastore_cluster = self.params.get('datastore_cluster')
+ self.datastore_objs = []
+ if datastore_name:
+ ds = self.find_datastore_by_name(datastore_name=datastore_name)
+ if not ds:
+ self.module.fail_json(msg='Failed to find datastore "%(datastore)s".' % self.params)
+ self.datastore_objs = [ds]
+ elif cluster_name:
+ cluster = find_cluster_by_name(self.content, cluster_name)
+ if not cluster:
+ self.module.fail_json(msg='Failed to find cluster "%(cluster_name)s".' % self.params)
+ self.datastore_objs = cluster.datastore
+ elif datastore_cluster:
+ datastore_cluster_obj = get_all_objs(self.content, [vim.StoragePod])
+ if not datastore_cluster_obj:
+ self.module.fail_json(msg='Failed to find datastore cluster "%(datastore_cluster)s".' % self.params)
+ for datastore in datastore_cluster_obj.childEntity:
+ self.datastore_objs.append(datastore)
+ else:
+ self.module.fail_json(msg="Please select one of 'cluster_name', 'datastore' or 'datastore_cluster'.")
+ self.state = self.params.get('state')
+
+ def ensure(self):
+ datastore_results = dict()
+ change_datastore_list = []
+ for datastore in self.datastore_objs:
+ changed = False
+ if self.state == 'present' and datastore.summary.maintenanceMode != 'normal':
+ datastore_results[datastore.name] = "Datastore '%s' is already in maintenance mode." % datastore.name
+ break
+ if self.state == 'absent' and datastore.summary.maintenanceMode == 'normal':
+ datastore_results[datastore.name] = "Datastore '%s' is not in maintenance mode." % datastore.name
+ break
+
+ try:
+ if self.state == 'present':
+ storage_replacement_result = datastore.DatastoreEnterMaintenanceMode()
+ task = storage_replacement_result.task
+ else:
+ task = datastore.DatastoreExitMaintenanceMode_Task()
+
+ success, result = wait_for_task(task)
+
+ if success:
+ changed = True
+ if self.state == 'present':
+ datastore_results[datastore.name] = "Datastore '%s' entered in maintenance mode." % datastore.name
+ else:
+ datastore_results[datastore.name] = "Datastore '%s' exited from maintenance mode." % datastore.name
+ except vim.fault.InvalidState as invalid_state:
+ if self.state == 'present':
+ msg = "Unable to enter datastore '%s' in" % datastore.name
+ else:
+ msg = "Unable to exit datastore '%s' from" % datastore.name
+ msg += " maintenance mode due to : %s" % to_native(invalid_state.msg)
+ self.module.fail_json(msg=msg)
+ except Exception as exc:
+ if self.state == 'present':
+ msg = "Unable to enter datastore '%s' in" % datastore.name
+ else:
+ msg = "Unable to exit datastore '%s' from" % datastore.name
+ msg += " maintenance mode due to generic exception : %s" % to_native(exc)
+ self.module.fail_json(msg=msg)
+ change_datastore_list.append(changed)
+
+ changed = False
+ if any(change_datastore_list):
+ changed = True
+ self.module.exit_json(changed=changed, datastore_status=datastore_results)
+
+
+def main():
+ spec = vmware_argument_spec()
+ spec.update(dict(
+ datastore=dict(type='str', required=False),
+ cluster_name=dict(type='str', required=False),
+ datastore_cluster=dict(type='str', required=False),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=spec,
+ required_one_of=[
+ ['datastore', 'cluster_name', 'datastore_cluster'],
+ ],
+ )
+
+ datastore_maintenance_mgr = VmwareDatastoreMaintenanceMgr(module=module)
+ datastore_maintenance_mgr.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_deploy_ovf.py b/ansible_collections/community/vmware/plugins/modules/vmware_deploy_ovf.py
new file mode 100644
index 000000000..cece1cb76
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_deploy_ovf.py
@@ -0,0 +1,752 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+author: 'Matt Martz (@sivel)'
+short_description: 'Deploys a VMware virtual machine from an OVF or OVA file'
+description:
+- 'This module can be used to deploy a VMware VM from an OVF or OVA file'
+module: vmware_deploy_ovf
+notes: []
+options:
+ allow_duplicates:
+ default: "True"
+ description:
+ - Whether or not to allow duplicate VM names. ESXi allows duplicates, vCenter may not.
+ type: bool
+ datacenter:
+ default: ha-datacenter
+ description:
+ - Datacenter to deploy to.
+ type: str
+ cluster:
+ description:
+ - Cluster to deploy to.
+ - This is a required parameter, if C(esxi_hostname) is not set and C(hostname) is set to the vCenter server.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ - This parameter is case sensitive.
+ type: str
+ esxi_hostname:
+ description:
+ - The ESXi hostname where the virtual machine will run.
+ - This is a required parameter, if C(cluster) is not set and C(hostname) is set to the vCenter server.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ - This parameter is case sensitive.
+ type: str
+ datastore:
+ default: datastore1
+ description:
+ - Datastore to deploy to.
+ type: str
+ deployment_option:
+ description:
+ - The key of the chosen deployment option.
+ type: str
+ disk_provisioning:
+ choices:
+ - flat
+ - eagerZeroedThick
+ - monolithicSparse
+ - twoGbMaxExtentSparse
+ - twoGbMaxExtentFlat
+ - thin
+ - sparse
+ - thick
+ - seSparse
+ - monolithicFlat
+ default: thin
+ description:
+ - Disk provisioning type.
+ type: str
+ fail_on_spec_warnings:
+ description:
+ - Cause the module to treat OVF Import Spec warnings as errors.
+ default: false
+ type: bool
+ folder:
+ description:
+ - Absolute path of folder to place the virtual machine.
+ - If not specified, defaults to the value of C(datacenter.vmFolder).
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ inject_ovf_env:
+ description:
+ - Force the given properties to be inserted into an OVF Environment and injected through VMware Tools.
+ type: bool
+ default: false
+ name:
+ description:
+ - Name of the VM to work with.
+ - Virtual machine names in vCenter are not necessarily unique, which may be problematic.
+ type: str
+ networks:
+ default:
+ VM Network: VM Network
+ description:
+ - 'C(key: value) mapping of OVF network name, to the vCenter network name.'
+ type: dict
+ ovf:
+ description:
+ - 'Path to OVF or OVA file to deploy.'
+ aliases:
+ - ova
+ type: path
+ power_on:
+ default: true
+ description:
+ - 'Whether or not to power on the virtual machine after creation.'
+ type: bool
+ properties:
+ description:
+ - The assignment of values to the properties found in the OVF as key value pairs.
+ type: dict
+ resource_pool:
+ default: Resources
+ description:
+ - Resource Pool to deploy to.
+ type: str
+ wait:
+ default: true
+ description:
+ - 'Wait for the host to power on.'
+ type: bool
+ wait_for_ip_address:
+ default: false
+ description:
+ - Wait until vCenter detects an IP address for the VM.
+ - This requires vmware-tools (vmtoolsd) to properly work after creation.
+ type: bool
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- community.vmware.vmware_deploy_ovf:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ ovf: /path/to/ubuntu-16.04-amd64.ovf
+ wait_for_ip_address: true
+ delegate_to: localhost
+
+# Deploys a new VM named 'NewVM' in specific datacenter/cluster, with network mapping taken from variable and using ova template from an absolute path
+- community.vmware.vmware_deploy_ovf:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: Datacenter1
+ cluster: Cluster1
+ datastore: vsandatastore
+ name: NewVM
+ networks: "{u'VM Network':u'{{ ProvisioningNetworkLabel }}'}"
+ power_on: false
+ ovf: /absolute/path/to/template/mytemplate.ova
+ delegate_to: localhost
+
+- community.vmware.vmware_deploy_ovf:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: Datacenter1
+ esxi_hostname: test-server
+ datastore: test-datastore
+ ovf: /path/to/ubuntu-16.04-amd64.ovf
+ delegate_to: localhost
+'''
+
+
+RETURN = r'''
+instance:
+ description: metadata about the new virtual machine
+ returned: always
+ type: dict
+ sample: None
+'''
+
+import io
+import os
+import sys
+import tarfile
+import time
+import traceback
+
+import xml.etree.ElementTree as ET
+
+from threading import Thread
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import string_types
+from ansible.module_utils.urls import generic_urlparse, open_url, urlparse, urlunparse
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ find_network_by_name,
+ find_vm_by_name,
+ PyVmomi,
+ gather_vm_facts,
+ vmware_argument_spec,
+ wait_for_task,
+ wait_for_vm_ip,
+ set_vm_power_state)
+try:
+ from ansible_collections.community.vmware.plugins.module_utils.vmware import vim
+ from pyVmomi import vmodl
+except ImportError:
+ pass
+
+
+def path_exists(value):
+ if not isinstance(value, string_types):
+ value = str(value)
+ value = os.path.expanduser(os.path.expandvars(value))
+ if not os.path.exists(value):
+ raise ValueError('%s is not a valid path' % value)
+ return value
+
+
+class ProgressReader(io.FileIO):
+ def __init__(self, name, mode='r', closefd=True):
+ self.bytes_read = 0
+ io.FileIO.__init__(self, name, mode=mode, closefd=closefd)
+
+ def read(self, size=10240):
+ chunk = io.FileIO.read(self, size)
+ self.bytes_read += len(chunk)
+ return chunk
+
+
+class TarFileProgressReader(tarfile.ExFileObject):
+ def __init__(self, *args):
+ self.bytes_read = 0
+ tarfile.ExFileObject.__init__(self, *args)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ try:
+ self.close()
+ except Exception:
+ pass
+
+ def read(self, size=10240):
+ chunk = tarfile.ExFileObject.read(self, size)
+ self.bytes_read += len(chunk)
+ return chunk
+
+
+class VMDKUploader(Thread):
+ def __init__(self, vmdk, url, validate_certs=True, tarinfo=None, create=False):
+ Thread.__init__(self)
+
+ self.vmdk = vmdk
+
+ if tarinfo:
+ self.size = tarinfo.size
+ else:
+ self.size = os.stat(vmdk).st_size
+
+ self.url = url
+ self.validate_certs = validate_certs
+ self.tarinfo = tarinfo
+
+ self.f = None
+ self.e = None
+
+ self._create = create
+
+ @property
+ def bytes_read(self):
+ try:
+ return self.f.bytes_read
+ except AttributeError:
+ return 0
+
+ def _request_opts(self):
+ '''
+ Requests for vmdk files differ from other file types. Build the request options here to handle that
+ '''
+ headers = {
+ 'Content-Length': self.size,
+ 'Content-Type': 'application/octet-stream',
+ }
+
+ if self._create:
+ # Non-VMDK
+ method = 'PUT'
+ headers['Overwrite'] = 't'
+ else:
+ # VMDK
+ method = 'POST'
+ headers['Content-Type'] = 'application/x-vnd.vmware-streamVmdk'
+
+ return {
+ 'method': method,
+ 'headers': headers,
+ }
+
+ def _open_url(self):
+ open_url(self.url, data=self.f, validate_certs=self.validate_certs, **self._request_opts())
+
+ def run(self):
+ if self.tarinfo:
+ try:
+ with TarFileProgressReader(self.vmdk, self.tarinfo) as self.f:
+ self._open_url()
+ except Exception:
+ self.e = sys.exc_info()
+ else:
+ try:
+ with ProgressReader(self.vmdk, 'rb') as self.f:
+ self._open_url()
+ except Exception:
+ self.e = sys.exc_info()
+
+
+class VMwareDeployOvf(PyVmomi):
+ def __init__(self, module):
+ super(VMwareDeployOvf, self).__init__(module)
+ self.module = module
+ self.params = module.params
+
+ self.datastore = None
+ self.datacenter = None
+ self.resource_pool = None
+ self.network_mappings = []
+
+ self.ovf_descriptor = None
+ self.tar = None
+
+ self.lease = None
+ self.import_spec = None
+ self.entity = None
+
+ def get_objects(self):
+ # Get datacenter firstly
+ self.datacenter = self.find_datacenter_by_name(self.params['datacenter'])
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter '%(datacenter)s' could not be located" % self.params)
+
+ # Get cluster in datacenter if cluster configured
+ if self.params['cluster']:
+ cluster = self.find_cluster_by_name(self.params['cluster'], datacenter_name=self.datacenter)
+ if cluster is None:
+ self.module.fail_json(msg="Unable to find cluster '%(cluster)s'" % self.params)
+ self.resource_pool = self.find_resource_pool_by_cluster(self.params['resource_pool'], cluster=cluster)
+ # Or get ESXi host in datacenter if ESXi host configured
+ elif self.params['esxi_hostname']:
+ host = self.find_hostsystem_by_name(self.params['esxi_hostname'], datacenter=self.datacenter)
+ if host is None:
+ self.module.fail_json(msg="Unable to find host '%(esxi_hostname)s' in datacenter '%(datacenter)s'" % self.params)
+ self.resource_pool = self.find_resource_pool_by_name(self.params['resource_pool'], folder=host.parent)
+ else:
+ # For more than one datacenter env, specify 'folder' to datacenter hostFolder
+ self.resource_pool = self.find_resource_pool_by_name(self.params['resource_pool'], folder=self.datacenter.hostFolder)
+
+ if not self.resource_pool:
+ self.module.fail_json(msg="Resource pool '%(resource_pool)s' could not be located" % self.params)
+
+ self.datastore = None
+ datastore_cluster_obj = self.find_datastore_cluster_by_name(self.params['datastore'], datacenter=self.datacenter)
+ if datastore_cluster_obj:
+ datastore = None
+ datastore_freespace = 0
+ for ds in datastore_cluster_obj.childEntity:
+ if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
+ # If datastore field is provided, filter destination datastores
+ if ds.summary.maintenanceMode != 'normal' or not ds.summary.accessible:
+ continue
+ datastore = ds
+ datastore_freespace = ds.summary.freeSpace
+ if datastore:
+ self.datastore = datastore
+ else:
+ self.datastore = self.find_datastore_by_name(self.params['datastore'], datacenter_name=self.datacenter)
+
+ if self.datastore is None:
+ self.module.fail_json(msg="Datastore '%(datastore)s' could not be located on specified ESXi host or"
+ " datacenter" % self.params)
+
+ for key, value in self.params['networks'].items():
+ network = find_network_by_name(self.content, value, datacenter_name=self.datacenter)
+ if not network:
+ self.module.fail_json(msg='%(networks)s could not be located' % self.params)
+ network_mapping = vim.OvfManager.NetworkMapping()
+ network_mapping.name = key
+ network_mapping.network = network
+ self.network_mappings.append(network_mapping)
+
+ return self.datastore, self.datacenter, self.resource_pool, self.network_mappings
+
+ def get_ovf_descriptor(self):
+ # Check whether ovf/ova file exists
+ try:
+ path_exists(self.params['ovf'])
+ except ValueError as e:
+ self.module.fail_json(msg="%s" % e)
+
+ if tarfile.is_tarfile(self.params['ovf']):
+ self.tar = tarfile.open(self.params['ovf'])
+ ovf = None
+ for candidate in self.tar.getmembers():
+ dummy, ext = os.path.splitext(candidate.name)
+ if ext.lower() == '.ovf':
+ ovf = candidate
+ break
+ if not ovf:
+ self.module.fail_json(msg='Could not locate OVF file in %(ovf)s' % self.params)
+
+ self.ovf_descriptor = to_native(self.tar.extractfile(ovf).read())
+ else:
+ with open(self.params['ovf']) as f:
+ self.ovf_descriptor = f.read()
+
+ return self.ovf_descriptor
+
+ def get_lease(self):
+ datastore, datacenter, resource_pool, network_mappings = self.get_objects()
+
+ params = {
+ 'diskProvisioning': self.params['disk_provisioning'],
+ }
+ if self.params['name']:
+ params['entityName'] = self.params['name']
+ if network_mappings:
+ params['networkMapping'] = network_mappings
+ if self.params['deployment_option']:
+ params['deploymentOption'] = self.params['deployment_option']
+ if self.params['properties']:
+ params['propertyMapping'] = []
+ for key, value in self.params['properties'].items():
+ property_mapping = vim.KeyValue()
+ property_mapping.key = key
+ property_mapping.value = str(value) if isinstance(value, bool) else value
+ params['propertyMapping'].append(property_mapping)
+
+ if self.params['folder']:
+ folder = self.content.searchIndex.FindByInventoryPath(self.params['folder'])
+ if not folder:
+ self.module.fail_json(msg="Unable to find the specified folder %(folder)s" % self.params)
+ else:
+ folder = datacenter.vmFolder
+
+ spec_params = vim.OvfManager.CreateImportSpecParams(**params)
+
+ ovf_descriptor = self.get_ovf_descriptor()
+
+ self.import_spec = self.content.ovfManager.CreateImportSpec(
+ ovf_descriptor,
+ resource_pool,
+ datastore,
+ spec_params
+ )
+
+ errors = [to_native(e.msg) for e in getattr(self.import_spec, 'error', [])]
+ if self.params['fail_on_spec_warnings']:
+ errors.extend(
+ (to_native(w.msg) for w in getattr(self.import_spec, 'warning', []))
+ )
+ if errors:
+ self.module.fail_json(
+ msg='Failure validating OVF import spec: %s' % '. '.join(errors)
+ )
+
+ for warning in getattr(self.import_spec, 'warning', []):
+ self.module.warn('Problem validating OVF import spec: %s' % to_native(warning.msg))
+
+ name = self.params.get('name')
+ if not self.params['allow_duplicates']:
+ name = self.import_spec.importSpec.configSpec.name
+ match = find_vm_by_name(self.content, name, folder=folder)
+ if match:
+ self.module.exit_json(instance=gather_vm_facts(self.content, match), changed=False)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, instance={'hw_name': name})
+
+ try:
+ self.lease = resource_pool.ImportVApp(
+ self.import_spec.importSpec,
+ folder
+ )
+ except vmodl.fault.SystemError as e:
+ self.module.fail_json(
+ msg='Failed to start import: %s' % to_native(e.msg)
+ )
+
+ while self.lease.state != vim.HttpNfcLease.State.ready:
+ time.sleep(0.1)
+
+ self.entity = self.lease.info.entity
+
+ return self.lease, self.import_spec
+
+ def _normalize_url(self, url):
+ '''
+ The hostname in URLs from vmware may be ``*`` update it accordingly
+ '''
+ url_parts = generic_urlparse(urlparse(url))
+ if url_parts.hostname == '*':
+ if url_parts.port:
+ url_parts.netloc = '%s:%d' % (self.params['hostname'], url_parts.port)
+ else:
+ url_parts.netloc = self.params['hostname']
+
+ return urlunparse(url_parts.as_list())
+
+ def vm_existence_check(self):
+ vm_obj = self.get_vm()
+ if vm_obj:
+ self.entity = vm_obj
+ facts = self.deploy()
+ self.module.exit_json(**facts)
+
+ def upload(self):
+ if self.params['ovf'] is None:
+ self.module.fail_json(msg="OVF path is required for upload operation.")
+
+ ovf_dir = os.path.dirname(self.params['ovf'])
+
+ lease, import_spec = self.get_lease()
+
+ uploaders = []
+
+ for file_item in import_spec.fileItem:
+ device_upload_url = None
+ for device_url in lease.info.deviceUrl:
+ if file_item.deviceId == device_url.importKey:
+ device_upload_url = self._normalize_url(device_url.url)
+ break
+
+ if not device_upload_url:
+ lease.HttpNfcLeaseAbort(
+ vmodl.fault.SystemError(reason='Failed to find deviceUrl for file %s' % file_item.path)
+ )
+ self.module.fail_json(
+ msg='Failed to find deviceUrl for file %s' % file_item.path
+ )
+
+ vmdk_tarinfo = None
+ if self.tar:
+ vmdk = self.tar
+ try:
+ vmdk_tarinfo = self.tar.getmember(file_item.path)
+ except KeyError:
+ lease.HttpNfcLeaseAbort(
+ vmodl.fault.SystemError(reason='Failed to find VMDK file %s in OVA' % file_item.path)
+ )
+ self.module.fail_json(
+ msg='Failed to find VMDK file %s in OVA' % file_item.path
+ )
+ else:
+ vmdk = os.path.join(ovf_dir, file_item.path)
+ try:
+ path_exists(vmdk)
+ except ValueError:
+ lease.HttpNfcLeaseAbort(
+ vmodl.fault.SystemError(reason='Failed to find VMDK file at %s' % vmdk)
+ )
+ self.module.fail_json(
+ msg='Failed to find VMDK file at %s' % vmdk
+ )
+
+ uploaders.append(
+ VMDKUploader(
+ vmdk,
+ device_upload_url,
+ self.params['validate_certs'],
+ tarinfo=vmdk_tarinfo,
+ create=file_item.create
+ )
+ )
+
+ total_size = sum(u.size for u in uploaders)
+ total_bytes_read = [0] * len(uploaders)
+ for i, uploader in enumerate(uploaders):
+ uploader.start()
+ while uploader.is_alive():
+ time.sleep(0.1)
+ total_bytes_read[i] = uploader.bytes_read
+ lease.HttpNfcLeaseProgress(int(100.0 * sum(total_bytes_read) / total_size))
+
+ if uploader.e:
+ lease.HttpNfcLeaseAbort(
+ vmodl.fault.SystemError(reason='%s' % to_native(uploader.e[1]))
+ )
+ self.module.fail_json(
+ msg='%s' % to_native(uploader.e[1]),
+ exception=''.join(traceback.format_tb(uploader.e[2]))
+ )
+
+ def complete(self):
+ self.lease.HttpNfcLeaseComplete()
+
+ def inject_ovf_env(self):
+ attrib = {
+ 'xmlns': 'http://schemas.dmtf.org/ovf/environment/1',
+ 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
+ 'xmlns:oe': 'http://schemas.dmtf.org/ovf/environment/1',
+ 'xmlns:ve': 'http://www.vmware.com/schema/ovfenv',
+ 'oe:id': '',
+ 've:esxId': self.entity._moId
+ }
+ env = ET.Element('Environment', **attrib)
+
+ platform = ET.SubElement(env, 'PlatformSection')
+ ET.SubElement(platform, 'Kind').text = self.content.about.name
+ ET.SubElement(platform, 'Version').text = self.content.about.version
+ ET.SubElement(platform, 'Vendor').text = self.content.about.vendor
+ ET.SubElement(platform, 'Locale').text = 'US'
+
+ prop_section = ET.SubElement(env, 'PropertySection')
+ for key, value in self.params['properties'].items():
+ params = {
+ 'oe:key': key,
+ 'oe:value': str(value) if isinstance(value, bool) else value
+ }
+ ET.SubElement(prop_section, 'Property', **params)
+
+ opt = vim.option.OptionValue()
+ opt.key = 'guestinfo.ovfEnv'
+ opt.value = '<?xml version="1.0" encoding="UTF-8"?>' + to_native(ET.tostring(env))
+
+ config_spec = vim.vm.ConfigSpec()
+ config_spec.extraConfig = [opt]
+
+ task = self.entity.ReconfigVM_Task(config_spec)
+ wait_for_task(task)
+
+ def deploy(self):
+ facts = {}
+
+ if self.params['power_on']:
+ facts = set_vm_power_state(self.content, self.entity, 'poweredon', force=False)
+ if self.params['wait_for_ip_address']:
+ _facts = wait_for_vm_ip(self.content, self.entity)
+ if not _facts:
+ self.module.fail_json(msg='Waiting for IP address timed out')
+
+ if not facts:
+ facts.update(dict(instance=gather_vm_facts(self.content, self.entity)))
+
+ return facts
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update({
+ 'name': {},
+ 'datastore': {
+ 'default': 'datastore1',
+ },
+ 'datacenter': {
+ 'default': 'ha-datacenter',
+ },
+ 'cluster': {
+ 'default': None,
+ },
+ 'esxi_hostname': {
+ 'default': None,
+ },
+ 'deployment_option': {
+ 'default': None,
+ },
+ 'folder': {
+ 'default': None,
+ },
+ 'inject_ovf_env': {
+ 'default': False,
+ 'type': 'bool',
+ },
+ 'resource_pool': {
+ 'default': 'Resources',
+ },
+ 'networks': {
+ 'default': {
+ 'VM Network': 'VM Network',
+ },
+ 'type': 'dict',
+ },
+ 'ovf': {
+ 'type': 'path',
+ 'aliases': ['ova'],
+ },
+ 'disk_provisioning': {
+ 'choices': [
+ 'flat',
+ 'eagerZeroedThick',
+ 'monolithicSparse',
+ 'twoGbMaxExtentSparse',
+ 'twoGbMaxExtentFlat',
+ 'thin',
+ 'sparse',
+ 'thick',
+ 'seSparse',
+ 'monolithicFlat'
+ ],
+ 'default': 'thin',
+ },
+ 'power_on': {
+ 'type': 'bool',
+ 'default': True,
+ },
+ 'properties': {
+ 'type': 'dict',
+ },
+ 'wait': {
+ 'type': 'bool',
+ 'default': True,
+ },
+ 'wait_for_ip_address': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'allow_duplicates': {
+ 'type': 'bool',
+ 'default': True,
+ },
+ 'fail_on_spec_warnings': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ })
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['cluster', 'esxi_hostname'],
+ ],
+ )
+
+ deploy_ovf = VMwareDeployOvf(module)
+ deploy_ovf.vm_existence_check()
+ deploy_ovf.upload()
+ deploy_ovf.complete()
+
+ if module.params['inject_ovf_env']:
+ deploy_ovf.inject_ovf_env()
+
+ facts = deploy_ovf.deploy()
+ facts.update(changed=True)
+ module.exit_json(**facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_drs_group.py b/ansible_collections/community/vmware/plugins/modules/vmware_drs_group.py
new file mode 100644
index 000000000..a4400d789
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_drs_group.py
@@ -0,0 +1,569 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Karsten Kaj Jakobsen <kj@patientsky.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - "Karsten Kaj Jakobsen (@karstenjakobsen)"
+description:
+ - "This module can be used to create VM/Host groups in a given cluster. Creates a vm group if C(vms) is set. Creates a host group if C(hosts) is set."
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+module: vmware_drs_group
+options:
+ cluster_name:
+ description:
+ - "Cluster to create vm/host group."
+ required: true
+ type: str
+ datacenter:
+ aliases:
+ - datacenter_name
+ description:
+ - "Datacenter to search for given cluster. If not set, we use first cluster we encounter with C(cluster_name)."
+ required: false
+ type: str
+ group_name:
+ description:
+ - "The name of the group to create or remove."
+ required: true
+ type: str
+ hosts:
+ description:
+ - "List of hosts to create in group."
+ - "Required only if C(vms) is not set."
+ required: false
+ type: list
+ elements: str
+ state:
+ choices:
+ - present
+ - absent
+ default: present
+ description:
+ - "If set to C(present) and the group doesn't exists then the group will be created."
+ - "If set to C(absent) and the group exists then the group will be deleted."
+ type: str
+ vms:
+ description:
+ - "List of vms to create in group."
+ - "Required only if C(hosts) is not set."
+ required: false
+ type: list
+ elements: str
+short_description: "Creates vm/host group in a given cluster."
+'''
+
+EXAMPLES = r'''
+---
+- name: "Create DRS VM group"
+ delegate_to: localhost
+ community.vmware.vmware_drs_group:
+ hostname: "{{ vcenter_hostname }}"
+ password: "{{ vcenter_password }}"
+ username: "{{ vcenter_username }}"
+ cluster_name: DC0_C0
+ datacenter_name: DC0
+ group_name: TEST_VM_01
+ vms:
+ - DC0_C0_RP0_VM0
+ - DC0_C0_RP0_VM1
+ state: present
+
+- name: "Create DRS Host group"
+ delegate_to: localhost
+ community.vmware.vmware_drs_group:
+ hostname: "{{ vcenter_hostname }}"
+ password: "{{ vcenter_password }}"
+ username: "{{ vcenter_username }}"
+ cluster_name: DC0_C0
+ datacenter_name: DC0
+ group_name: TEST_HOST_01
+ hosts:
+ - DC0_C0_H0
+ - DC0_C0_H1
+ - DC0_C0_H2
+ state: present
+
+- name: "Delete DRS Host group"
+ delegate_to: localhost
+ community.vmware.vmware_drs_group:
+ hostname: "{{ vcenter_hostname }}"
+ password: "{{ vcenter_password }}"
+ username: "{{ vcenter_username }}"
+ cluster_name: DC0_C0
+ datacenter_name: DC0
+ group_name: TEST_HOST_01
+ state: absent
+'''
+
+RETURN = r'''
+drs_group_facts:
+ description: Metadata about DRS group created
+ returned: always
+ type: dict
+ sample:
+ "drs_group_facts": {
+ "changed": true,
+ "failed": false,
+ "msg": "Created host group TEST_HOST_01 successfully",
+ "result": {
+ "DC0_C0": [
+ {
+ "group_name": "TEST_HOST_01",
+ "hosts": [
+ "DC0_C0_H0",
+ "DC0_C0_H1",
+ "DC0_C0_H2"
+ ],
+ "type": "host"
+ }
+ ]
+ }
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ vmware_argument_spec,
+ wait_for_task,
+ find_cluster_by_name,
+ find_vm_by_id,
+ find_datacenter_by_name)
+
+
+class VmwareDrsGroupManager(PyVmomi):
+ """
+ Class to manage DRS groups
+ """
+
+ def __init__(self, module, cluster_name, group_name, state,
+ datacenter_name=None, vm_list=None, host_list=None):
+ """
+ Init
+ """
+
+ super(VmwareDrsGroupManager, self).__init__(module)
+
+ self.__datacenter_name = datacenter_name
+ self.__datacenter_obj = None
+ self.__cluster_name = cluster_name
+ self.__cluster_obj = None
+ self.__group_name = group_name
+ self.__group_obj = None
+ self.__operation = None
+ self.__vm_list = vm_list
+ self.__vm_obj_list = []
+ self.__host_list = host_list
+ self.__host_obj_list = []
+ self.__msg = 'Nothing to see here...'
+ self.__result = dict()
+ self.__changed = False
+ self.__state = state
+
+ if datacenter_name is not None:
+
+ self.__datacenter_obj = find_datacenter_by_name(self.content, self.__datacenter_name)
+
+ if self.__datacenter_obj is None:
+ raise Exception("Datacenter '%s' not found" % self.__datacenter_name)
+
+ self.__cluster_obj = find_cluster_by_name(content=self.content,
+ cluster_name=self.__cluster_name,
+ datacenter=self.__datacenter_obj)
+
+ # Throw error if cluster does not exist
+ if self.__cluster_obj is None:
+ if not module.check_mode:
+ raise Exception("Cluster '%s' not found" % self.__cluster_name)
+ else:
+ # get group
+ self.__group_obj = self.__get_group_by_name()
+ # Set result here. If nothing is to be updated, result is already set
+ self.__set_result(self.__group_obj)
+
+ # Do not populate lists if we are deleting group
+ if self.__state == 'present':
+
+ if self.__group_obj:
+ self.__operation = 'edit'
+ else:
+ self.__operation = 'add'
+
+ if self.__vm_list is not None:
+ self.__set_vm_obj_list(vm_list=self.__vm_list)
+
+ if self.__host_list is not None:
+ self.__set_host_obj_list(host_list=self.__host_list)
+ else:
+ self.__operation = 'remove'
+
+ def get_msg(self):
+ """
+ Returns message for Ansible result
+ Args: none
+
+ Returns: string
+ """
+ return self.__msg
+
+ def get_result(self):
+ """
+ Returns result for Ansible
+ Args: none
+
+ Returns: dict
+ """
+ return self.__result
+
+ def __set_result(self, group_obj):
+ """
+ Creates result for successful run
+ Args:
+ group_obj: group object
+
+ Returns: None
+
+ """
+ self.__result = dict()
+
+ if (self.__cluster_obj is not None) and (group_obj is not None):
+ self.__result[self.__cluster_obj.name] = []
+ self.__result[self.__cluster_obj.name].append(self.__normalize_group_data(group_obj))
+
+ def get_changed(self):
+ """
+ Returns if anything changed
+ Args: none
+
+ Returns: boolean
+ """
+ return self.__changed
+
+ def __set_vm_obj_list(self, vm_list=None, cluster_obj=None):
+ """
+ Function populate vm object list from list of vms
+ Args:
+ vm_list: List of vm names
+
+ Returns: None
+
+ """
+
+ if vm_list is None:
+ vm_list = self.__vm_list
+
+ if cluster_obj is None:
+ cluster_obj = self.__cluster_obj
+
+ if vm_list is not None:
+ for vm in vm_list:
+ if not self.module.check_mode:
+ # Get host data
+ vm_obj = find_vm_by_id(content=self.content, vm_id=vm,
+ vm_id_type='vm_name', cluster=cluster_obj)
+ if vm_obj is None:
+ raise Exception("VM %s does not exist in cluster %s" % (vm,
+ self.__cluster_name))
+ self.__vm_obj_list.append(vm_obj)
+
+ def __set_host_obj_list(self, host_list=None):
+ """
+ Function populate host object list from list of hostnames
+ Args:
+ host_list: List of host names
+
+ Returns: None
+
+ """
+
+ if host_list is None:
+ host_list = self.__host_list
+
+ if host_list is not None:
+ for host in host_list:
+ if not self.module.check_mode:
+ # Get host data
+ host_obj = self.find_hostsystem_by_name(host)
+ if host_obj is None:
+ raise Exception("ESXi host %s does not exist in cluster %s" % (host, self.__cluster_name))
+ self.__host_obj_list.append(host_obj)
+
+ def __get_group_by_name(self, group_name=None, cluster_obj=None):
+ """
+ Function to get group by name
+ Args:
+ group_name: Name of group
+ cluster_obj: vim Cluster object
+
+ Returns: Group Object if found or None
+
+ """
+
+ if group_name is None:
+ group_name = self.__group_name
+
+ if cluster_obj is None:
+ cluster_obj = self.__cluster_obj
+
+ # Allow for group check even if dry run
+ if self.module.check_mode and cluster_obj is None:
+ return None
+
+ for group in cluster_obj.configurationEx.group:
+ if group.name == group_name:
+ return group
+
+ # No group found
+ return None
+
+ def __populate_vm_host_list(self, group_name=None, cluster_obj=None, host_group=False):
+ """
+ Return all VM/Host names using given group name
+ Args:
+ group_name: group name
+ cluster_obj: Cluster managed object
+ host_group: True if we want only host name from group
+
+ Returns: List of VM/Host names belonging to given group object
+
+ """
+ obj_name_list = []
+
+ if group_name is None:
+ group_name = self.__group_name
+
+ if cluster_obj is None:
+ cluster_obj = self.__cluster_obj
+
+ if not all([group_name, cluster_obj]):
+ return obj_name_list
+
+ group = self.__group_obj
+
+ if not host_group and isinstance(group, vim.cluster.VmGroup):
+ obj_name_list = [vm.name for vm in group.vm]
+
+ elif host_group and isinstance(group, vim.cluster.HostGroup):
+ obj_name_list = [host.name for host in group.host]
+
+ return obj_name_list
+
+ def __check_if_vms_hosts_changed(self, group_name=None, cluster_obj=None, host_group=False):
+ """
+ Function to check if VMs/Hosts changed
+ Args:
+ group_name: Name of group
+ cluster_obj: vim Cluster object
+ host_group: True if we want to check hosts, else check vms
+
+ Returns: Bool
+
+ """
+
+ if group_name is None:
+ group_name = self.__group_name
+
+ if cluster_obj is None:
+ cluster_obj = self.__cluster_obj
+
+ list_a = self.__host_list if host_group else self.__vm_list
+ list_b = self.__populate_vm_host_list(host_group=host_group)
+
+ # By casting lists as a set, you remove duplicates and order doesn't count. Comparing sets is also much faster and more efficient than comparing lists.
+ if set(list_a) == set(list_b):
+ return False
+ return True
+
+ def __create_host_group(self):
+
+ # Check if anything has changed when editing
+ if self.__operation == 'add' or (self.__operation == 'edit' and self.__check_if_vms_hosts_changed(host_group=True)):
+
+ group = vim.cluster.HostGroup()
+ group.name = self.__group_name
+ group.host = self.__host_obj_list
+
+ group_spec = vim.cluster.GroupSpec(info=group, operation=self.__operation)
+ config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])
+
+ changed = True
+ if not self.module.check_mode:
+ task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True)
+ changed, result = wait_for_task(task)
+
+ # Set new result since something changed
+ self.__set_result(group)
+ self.__changed = changed
+
+ if self.__operation == 'edit':
+ self.__msg = "Updated host group %s successfully" % (self.__group_name)
+ else:
+ self.__msg = "Created host group %s successfully" % (self.__group_name)
+
+ def __create_vm_group(self):
+
+ # Check if anything has changed when editing
+ if self.__operation == 'add' or (self.__operation == 'edit' and self.__check_if_vms_hosts_changed()):
+
+ group = vim.cluster.VmGroup()
+ group.name = self.__group_name
+ group.vm = self.__vm_obj_list
+
+ group_spec = vim.cluster.GroupSpec(info=group, operation=self.__operation)
+ config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])
+
+ # Check if dry run
+ changed = True
+ if not self.module.check_mode:
+ task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True)
+ changed, result = wait_for_task(task)
+
+ self.__set_result(group)
+ self.__changed = changed
+
+ if self.__operation == 'edit':
+ self.__msg = "Updated vm group %s successfully" % (self.__group_name)
+ else:
+ self.__msg = "Created vm group %s successfully" % (self.__group_name)
+
+ def __normalize_group_data(self, group_obj):
+ """
+ Return human readable group spec
+ Args:
+ group_obj: Group object
+
+ Returns: DRS group object fact
+
+ """
+ if not all([group_obj]):
+ return {}
+
+ # Check if group is a host group
+ if hasattr(group_obj, 'host'):
+ return dict(
+ group_name=group_obj.name,
+ hosts=self.__host_list,
+ type="host"
+ )
+ return dict(
+ group_name=group_obj.name,
+ vms=self.__vm_list,
+ type="vm"
+ )
+
+ def create_drs_group(self):
+ """
+ Function to create a DRS host/vm group
+ """
+
+ if self.__vm_list is None:
+ self.__create_host_group()
+ elif self.__host_list is None:
+ self.__create_vm_group()
+ else:
+ raise Exception('Failed, no hosts or vms defined')
+
+ def delete_drs_group(self):
+ """
+ Function to delete a DRS host/vm group
+ """
+
+ if self.__group_obj is not None:
+
+ self.__changed = True
+
+ # Check if dry run
+ if not self.module.check_mode:
+
+ group_spec = vim.cluster.GroupSpec(removeKey=self.__group_name, operation=self.__operation)
+ config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])
+
+ task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True)
+ wait_for_task(task)
+
+ # Do not throw error if group does not exist. Simply set changed = False
+ if self.__changed:
+ self.__msg = "Deleted group `%s` successfully" % (self.__group_name)
+ else:
+ self.__msg = "DRS group `%s` does not exists or already deleted" % (self.__group_name)
+
+
+def main():
+ """
+ Main function
+ """
+
+ argument_spec = vmware_argument_spec()
+
+ argument_spec.update(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ datacenter=dict(type='str', required=False, aliases=['datacenter_name']),
+ cluster_name=dict(type='str', required=True),
+ group_name=dict(type='str', required=True),
+ vms=dict(type='list', elements='str'),
+ hosts=dict(type='list', elements='str'),
+ )
+
+ required_if = [
+ ['state', 'absent', ['group_name']]
+ ]
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ mutually_exclusive=[['vms', 'hosts']],
+ required_one_of=[['vms', 'hosts']]
+ )
+
+ try:
+ # Create instance of VmwareDrsGroupManager
+ vmware_drs_group = VmwareDrsGroupManager(module=module,
+ datacenter_name=module.params.get('datacenter', None),
+ cluster_name=module.params['cluster_name'],
+ group_name=module.params['group_name'],
+ vm_list=module.params['vms'],
+ host_list=module.params['hosts'],
+ state=module.params['state'])
+
+ if module.params['state'] == 'present':
+ # Add DRS group
+ vmware_drs_group.create_drs_group()
+ elif module.params['state'] == 'absent':
+ # Delete DRS group
+ vmware_drs_group.delete_drs_group()
+
+ # Set results
+ results = dict(msg=vmware_drs_group.get_msg(),
+ failed=False,
+ changed=vmware_drs_group.get_changed(),
+ result=vmware_drs_group.get_result())
+
+ except Exception as error:
+ results = dict(failed=True, msg="Error: %s" % error)
+
+ if results['failed']:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_drs_group_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_drs_group_info.py
new file mode 100644
index 000000000..d964b5527
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_drs_group_info.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Karsten Kaj Jakobsen <kj@patientsky.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - "Karsten Kaj Jakobsen (@karstenjakobsen)"
+description:
+ - "This module can be used to gather information about DRS VM/HOST groups from the given cluster."
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+module: vmware_drs_group_info
+options:
+ cluster_name:
+ description:
+ - "Cluster to search for VM/Host groups."
+ - "If set, information of DRS groups belonging this cluster will be returned."
+ - "Not needed if C(datacenter) is set."
+ required: false
+ type: str
+ datacenter:
+ aliases:
+ - datacenter_name
+ description:
+ - "Datacenter to search for DRS VM/Host groups."
+ required: false
+ type: str
+short_description: "Gathers info about DRS VM/Host groups on the given cluster"
+'''
+
+EXAMPLES = r'''
+---
+- name: "Gather DRS info about given Cluster"
+ register: cluster_drs_group_info
+ community.vmware.vmware_drs_group_info:
+ hostname: "{{ vcenter_hostname }}"
+ password: "{{ vcenter_password }}"
+ username: "{{ vcenter_username }}"
+ cluster_name: "{{ cluster_name }}"
+ delegate_to: localhost
+
+- name: "Gather DRS group info about all clusters in given datacenter"
+ register: cluster_drs_group_info
+ community.vmware.vmware_drs_group_info:
+ hostname: "{{ vcenter_hostname }}"
+ password: "{{ vcenter_password }}"
+ username: "{{ vcenter_username }}"
+ datacenter: "{{ datacenter }}"
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+drs_group_info:
+ description: Metadata about DRS group from given cluster / datacenter
+ returned: always
+ type: dict
+ sample:
+ "drs_group_info": {
+ "DC0_C0": [
+ {
+ "group_name": "GROUP_HOST_S01",
+ "hosts": [
+ "vm-01.zone",
+ "vm-02.zone"
+ ],
+ "type": "host"
+ },
+ {
+ "group_name": "GROUP_HOST_S02",
+ "hosts": [
+ "vm-03.zone",
+ "vm-04.zone"
+ ],
+ "type": "host"
+ },
+ {
+ "group_name": "GROUP_VM_S01",
+ "type": "vm",
+ "vms": [
+ "test-node01"
+ ]
+ },
+ {
+ "group_name": "GROUP_VM_S02",
+ "type": "vm",
+ "vms": [
+ "test-node02"
+ ]
+ }
+ ],
+ "DC0_C1": []
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs
+
+
+class VmwareDrsGroupInfoManager(PyVmomi):
+
+ def __init__(self, module, datacenter_name, cluster_name=None):
+ """
+ Doctring: Init
+ """
+
+ super(VmwareDrsGroupInfoManager, self).__init__(module)
+
+ self.__datacenter_name = datacenter_name
+ self.__datacenter_obj = None
+ self.__cluster_name = cluster_name
+ self.__cluster_obj = None
+ self.__result = dict()
+
+ if self.__datacenter_name:
+
+ self.__datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=self.__datacenter_name)
+ self.cluster_obj_list = []
+
+ if self.__datacenter_obj:
+ folder = self.__datacenter_obj.hostFolder
+ self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder)
+ else:
+ raise Exception("Datacenter '%s' not found" % self.__datacenter_name)
+
+ if self.__cluster_name:
+
+ self.__cluster_obj = self.find_cluster_by_name(cluster_name=self.__cluster_name)
+
+ if self.__cluster_obj is None:
+ raise Exception("Cluster '%s' not found" % self.__cluster_name)
+ else:
+ self.cluster_obj_list = [self.__cluster_obj]
+
+ def get_result(self):
+ """
+ Docstring
+ """
+ return self.__result
+
+ def __set_result(self, result):
+ """
+ Sets result
+ Args:
+ result: drs group result list
+
+ Returns: None
+
+ """
+ self.__result = result
+
+ def __get_all_from_group(self, group_obj, host_group=False):
+ """
+ Return all VM / Host names using given group
+ Args:
+ group_obj: Group object
+ host_group: True if we want only host name from group
+
+ Returns: List of VM / Host names belonging to given group object
+
+ """
+ obj_name_list = []
+
+ if not all([group_obj]):
+ return obj_name_list
+
+ if not host_group and isinstance(group_obj, vim.cluster.VmGroup):
+ obj_name_list = [vm.name for vm in group_obj.vm]
+ elif host_group and isinstance(group_obj, vim.cluster.HostGroup):
+ obj_name_list = [host.name for host in group_obj.host]
+
+ return obj_name_list
+
+ def __normalize_group_data(self, group_obj):
+ """
+ Return human readable group spec
+ Args:
+ group_obj: Group object
+
+ Returns: Dictionary with DRS groups
+
+ """
+ if not all([group_obj]):
+ return {}
+
+ # Check if group is a host group
+ if hasattr(group_obj, 'host'):
+ return dict(
+ group_name=group_obj.name,
+ hosts=self.__get_all_from_group(group_obj=group_obj, host_group=True),
+ type="host"
+ )
+ else:
+ return dict(
+ group_name=group_obj.name,
+ vms=self.__get_all_from_group(group_obj=group_obj),
+ type="vm"
+ )
+
+ def gather_info(self):
+ """
+ Gather DRS group information about given cluster
+ Returns: Dictionary of clusters with DRS groups
+
+ """
+ cluster_group_info = dict()
+
+ for cluster_obj in self.cluster_obj_list:
+
+ cluster_group_info[cluster_obj.name] = []
+
+ for drs_group in cluster_obj.configurationEx.group:
+ cluster_group_info[cluster_obj.name].append(self.__normalize_group_data(drs_group))
+
+ self.__set_result(cluster_group_info)
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+
+ argument_spec.update(
+ datacenter=dict(type='str', required=False, aliases=['datacenter_name']),
+ cluster_name=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['cluster_name', 'datacenter']],
+ mutually_exclusive=[['cluster_name', 'datacenter']],
+ )
+
+ try:
+ # Create instance of VmwareDrsGroupManager
+ vmware_drs_group_info = VmwareDrsGroupInfoManager(
+ module=module,
+ datacenter_name=module.params.get('datacenter'),
+ cluster_name=module.params.get('cluster_name', None))
+
+ vmware_drs_group_info.gather_info()
+
+ # Set results
+ results = dict(failed=False,
+ drs_group_info=vmware_drs_group_info.get_result())
+
+ except Exception as error:
+ results = dict(failed=True, msg="Error: %s" % error)
+
+ if results['failed']:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_drs_group_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_drs_group_manager.py
new file mode 100644
index 000000000..2fc842718
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_drs_group_manager.py
@@ -0,0 +1,518 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Karsten Kaj Jakobsen <kj@patientsky.com>
+# Copyright: (c) 2020, Ansible Project
+# Copyright: (c) 2020, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_drs_group_manager
+author:
+ - Abhijeet Kasurde (@Akasurde)
+short_description: Manage VMs and Hosts in DRS group.
+description:
+ - The module can be used to add VMs / Hosts to or remove them from a DRS group.
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+options:
+ cluster:
+ description:
+ - Cluster to which DRS group associated with.
+ required: true
+ type: str
+ aliases:
+ - cluster_name
+ datacenter:
+ aliases:
+ - datacenter_name
+ description:
+ - Name of the datacenter.
+ required: false
+ type: str
+ group_name:
+ description:
+ - The name of the group to manage.
+ required: true
+ type: str
+ hosts:
+ description:
+ - A List of hosts to add / remove in the group.
+ - Required only if I(vms) is not set.
+ required: false
+ type: list
+ elements: str
+ vms:
+ description:
+ - A List of vms to add / remove in the group.
+ - Required only if I(hosts) is not set.
+ required: false
+ type: list
+ elements: str
+ state:
+ choices: [ present, absent ]
+ default: present
+ description:
+ - If set to C(present), VMs/hosts will be added to the given DRS group.
+ - If set to C(absent), VMs/hosts will be removed from the given DRS group.
+ type: str
+'''
+
+EXAMPLES = r'''
+---
+- name: Add VMs in an existing DRS VM group
+ delegate_to: localhost
+ community.vmware.vmware_drs_group_manager:
+ hostname: "{{ vcenter_hostname }}"
+ password: "{{ vcenter_password }}"
+ username: "{{ vcenter_username }}"
+ cluster: DC0_C0
+ datacenter: DC0
+ group_name: TEST_VM_01
+ vms:
+ - DC0_C0_RP0_VM0
+ - DC0_C0_RP0_VM1
+ state: present
+
+- name: Add Hosts in an existing DRS Host group
+ delegate_to: localhost
+ community.vmware.vmware_drs_group_manager:
+ hostname: "{{ vcenter_hostname }}"
+ password: "{{ vcenter_password }}"
+ username: "{{ vcenter_username }}"
+ cluster: DC0_C0
+ datacenter: DC0
+ group_name: TEST_HOST_01
+ hosts:
+ - DC0_C0_H0
+ - DC0_C0_H1
+ - DC0_C0_H2
+ state: present
+
+- name: Remove VM from an existing DRS VM group
+ delegate_to: localhost
+ community.vmware.vmware_drs_group_manager:
+ hostname: "{{ vcenter_hostname }}"
+ password: "{{ vcenter_password }}"
+ username: "{{ vcenter_username }}"
+ cluster: DC0_C0
+ datacenter: DC0
+ group_name: TEST_VM_01
+ vms:
+ - DC0_C0_RP0_VM0
+ state: absent
+
+- name: Remove host from an existing DRS Host group
+ delegate_to: localhost
+ community.vmware.vmware_drs_group_manager:
+ hostname: "{{ vcenter_hostname }}"
+ password: "{{ vcenter_password }}"
+ username: "{{ vcenter_username }}"
+ cluster: DC0_C0
+ datacenter: DC0
+ group_name: TEST_HOST_01
+ hosts:
+ - DC0_C0_H0
+ state: absent
+
+'''
+
+RETURN = r'''
+drs_group_member_info:
+ description: Metadata about DRS group
+ returned: always
+ type: dict
+ sample: {
+ "Asia-Cluster1": [
+ {
+ "group_name": "vm_group_002",
+ "type": "vm",
+ "vms": [
+ "dev-1"
+ ]
+ }
+ ]
+ }
+msg:
+ description: Info message
+ returned: always
+ type: str
+ sample: "Updated host group TEST_HOST_01 successfully"
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ vmware_argument_spec,
+ wait_for_task,
+ find_vm_by_id)
+
+
+class VmwareDrsGroupMemberManager(PyVmomi):
+ """
+ Class to manage DRS group members
+ """
+
+ def __init__(self, module):
+ """
+ Init
+ """
+
+ super(VmwareDrsGroupMemberManager, self).__init__(module)
+
+ self._datacenter_name = module.params.get('datacenter')
+ self._datacenter_obj = None
+ self._cluster_name = module.params.get('cluster')
+ self._cluster_obj = None
+ self._group_name = module.params.get('group_name')
+ self._group_obj = None
+ self._operation = None
+ self._vm_list = module.params.get('vms')
+ self._vm_obj_list = []
+ self._host_list = module.params.get('hosts')
+ self._host_obj_list = []
+ self.message = 'Nothing to see here...'
+ self.result = dict()
+ self.changed = False
+ self._state = module.params.get('state')
+
+ if self._datacenter_name is not None:
+ self._datacenter_obj = self.find_datacenter_by_name(self._datacenter_name)
+
+ if self._datacenter_obj is None:
+ self.module.fail_json(msg="Datacenter '%s' not found" % self._datacenter_name)
+
+ self._cluster_obj = self.find_cluster_by_name(self._cluster_name, self._datacenter_obj)
+
+ # Throw error if cluster does not exist
+ if self._cluster_obj is None:
+ self.module.fail_json(msg="Cluster '%s' not found" % self._cluster_name)
+
+ # get group
+ self._group_obj = self._get_group_by_name()
+ if self._group_obj is None:
+ self.module.fail_json(msg="Cluster %s does not have a DRS group %s" % (self._cluster_name, self._group_name))
+
+ # Set result here. If nothing is to be updated, result is already set
+ self._set_result(self._group_obj)
+
+ self._operation = 'edit' if self._state == 'present' else 'remove'
+
+ if self._vm_list is not None:
+ self._set_vm_obj_list(vm_list=self._vm_list)
+
+ if self._host_list is not None:
+ self._set_host_obj_list(host_list=self._host_list)
+
+ def _set_result(self, group_obj):
+ """
+ Creates result for successful run
+ Args:
+ group_obj: group object
+
+ Returns: None
+
+ """
+ self.result = dict()
+
+ if self._cluster_obj is not None and group_obj is not None:
+ self.result[self._cluster_obj.name] = []
+ self.result[self._cluster_obj.name].append(self._normalize_group_data(group_obj))
+
+ def _set_vm_obj_list(self, vm_list=None, cluster_obj=None):
+ """
+ Populate vm object list from list of vms
+ Args:
+ vm_list: List of vm names
+
+ Returns: None
+
+ """
+
+ if vm_list is None:
+ vm_list = self._vm_list
+
+ if cluster_obj is None:
+ cluster_obj = self._cluster_obj
+
+ if vm_list is not None:
+ for vm in vm_list:
+ if self.module.check_mode is False:
+ # Get host data
+ vm_obj = find_vm_by_id(content=self.content, vm_id=vm,
+ vm_id_type='vm_name', cluster=cluster_obj)
+ if vm_obj is None:
+ self.module.fail_json(msg="VM %s does not exist in cluster %s" % (vm, self._cluster_name))
+ self._vm_obj_list.append(vm_obj)
+
+ def _set_host_obj_list(self, host_list=None):
+ """
+ Populate host object list from list of hostnames
+ Args:
+ host_list: List of host names
+
+ Returns: None
+
+ """
+
+ if host_list is None:
+ host_list = self._host_list
+
+ if host_list is not None:
+ for host in host_list:
+ if self.module.check_mode is False:
+ # Get host data
+ host_obj = self.find_hostsystem_by_name(host)
+ if host_obj is None and self.module.check_mode is False:
+ self.module.fail_json(msg="ESXi host %s does not exist in cluster %s" % (host, self._cluster_name))
+ self._host_obj_list.append(host_obj)
+
+ def _get_group_by_name(self, group_name=None, cluster_obj=None):
+ """
+ Get group by name
+ Args:
+ group_name: Name of group
+ cluster_obj: vim Cluster object
+
+ Returns: Group Object if found or None
+
+ """
+
+ if group_name is None:
+ group_name = self._group_name
+
+ if cluster_obj is None:
+ cluster_obj = self._cluster_obj
+
+ # Allow for group check even if dry run
+ if self.module.check_mode and cluster_obj is None:
+ return None
+
+ for group in cluster_obj.configurationEx.group:
+ if group.name == group_name:
+ return group
+
+ # No group found
+ return None
+
+ def _populate_vm_host_list(self, group_name=None, cluster_obj=None, host_group=False):
+ """
+ Return all VMs/Hosts names using given group name
+ Args:
+ group_name: group name
+ cluster_obj: Cluster managed object
+ host_group: True if we want only host name from group
+
+ Returns: List of VMs/Hosts names belonging to given group object
+
+ """
+ obj_name_list = []
+
+ if group_name is None:
+ group_name = self._group_name
+
+ if cluster_obj is None:
+ cluster_obj = self._cluster_obj
+
+ if not all([group_name, cluster_obj]):
+ return obj_name_list
+
+ group = self._group_obj
+
+ if not host_group and isinstance(group, vim.cluster.VmGroup):
+ obj_name_list = [vm.name for vm in group.vm]
+
+ elif host_group and isinstance(group, vim.cluster.HostGroup):
+ obj_name_list = [host.name for host in group.host]
+
+ return obj_name_list
+
+ def _check_if_vms_hosts_changed(self, group_name=None, cluster_obj=None, host_group=False):
+ """
+ Check if VMs/Hosts changed
+ Args:
+ group_name: Name of group
+ cluster_obj: vim Cluster object
+ host_group: True if we want to check hosts, else check vms
+
+ Returns: Bool
+
+ """
+
+ if group_name is None:
+ group_name = self._group_name
+
+ if cluster_obj is None:
+ cluster_obj = self._cluster_obj
+
+ list_a = self._host_list if host_group else self._vm_list
+ list_b = self._populate_vm_host_list(host_group=host_group)
+
+ # By casting lists as a set, you remove duplicates and order doesn't count. Comparing sets is also much faster and more efficient than comparing lists.
+ if set(list_a) == set(list_b):
+ if self._operation != 'remove':
+ return False
+ return True
+
+ def _manage_host_group(self):
+ # Check if anything has changed when editing
+ if self._check_if_vms_hosts_changed(host_group=True):
+
+ need_reconfigure = False
+ group = vim.cluster.HostGroup()
+ group.name = self._group_name
+ group.host = self._group_obj.host or []
+
+ # Modify existing hosts
+ for host in self._host_obj_list:
+ if self._operation == 'edit' and host not in group.host:
+ group.host.append(host)
+ need_reconfigure = True
+ if self._operation == 'remove' and host in group.host:
+ group.host.remove(host)
+ need_reconfigure = True
+
+ group_spec = vim.cluster.GroupSpec(info=group, operation='edit')
+ config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])
+
+ if not self.module.check_mode and need_reconfigure:
+ task = self._cluster_obj.ReconfigureEx(config_spec, modify=True)
+ self.changed, dummy = wait_for_task(task)
+
+ # Set new result since something changed
+ self._set_result(group)
+ if self.changed:
+ self.message = "Updated host group %s successfully" % self._group_name
+ else:
+ self.message = "No update to host group %s" % self._group_name
+ else:
+ self.changed = False
+ self.message = "No update to host group %s" % self._group_name
+
+ def _manage_vm_group(self):
+
+ # Check if anything has changed when editing
+ if self._check_if_vms_hosts_changed():
+ need_reconfigure = False
+ group = vim.cluster.VmGroup()
+ group.name = self._group_name
+ group.vm = self._group_obj.vm or []
+
+ # Modify existing VMs
+ for vm in self._vm_obj_list:
+ if self._operation == 'edit' and vm not in group.vm:
+ group.vm.append(vm)
+ need_reconfigure = True
+ if self._operation == 'remove' and vm in group.vm:
+ group.vm.remove(vm)
+ need_reconfigure = True
+
+ group_spec = vim.cluster.GroupSpec(info=group, operation='edit')
+ config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec])
+
+ # Check if dry run
+ if not self.module.check_mode and need_reconfigure:
+ task = self._cluster_obj.ReconfigureEx(config_spec, modify=True)
+ self.changed, dummy = wait_for_task(task)
+
+ self._set_result(group)
+ if self.changed:
+ self.message = "Updated vm group %s successfully" % self._group_name
+ else:
+ self.message = "No update to vm group %s" % self._group_name
+ else:
+ self.changed = False
+ self.message = "No update to vm group %s" % self._group_name
+
+ def _normalize_group_data(self, group_obj):
+ """
+ Return human readable group spec
+ Args:
+ group_obj: Group object
+
+ Returns: DRS group object fact
+
+ """
+ if not all([group_obj]):
+ return {}
+
+ # Check if group is a host group
+ if hasattr(group_obj, 'host'):
+ return dict(
+ group_name=group_obj.name,
+ hosts=self._host_list,
+ type="host"
+ )
+ return dict(
+ group_name=group_obj.name,
+ vms=self._vm_list,
+ type="vm"
+ )
+
+ def manage_drs_group_members(self):
+ """
+ Add a DRS host/vm group members
+ """
+
+ if self._vm_list is None:
+ self._manage_host_group()
+ elif self._host_list is None:
+ self._manage_vm_group()
+ else:
+ self.module.fail_json(msg="Failed, no hosts or vms defined")
+
+
+def main():
+ """
+ Main function
+ """
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ datacenter=dict(type='str', required=False, aliases=['datacenter_name']),
+ cluster=dict(type='str', required=True, aliases=['cluster_name']),
+ group_name=dict(type='str', required=True),
+ vms=dict(type='list', elements='str'),
+ hosts=dict(type='list', elements='str')
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['vms', 'hosts']],
+ required_one_of=[['vms', 'hosts']]
+ )
+
+ try:
+ # Create instance of VmwareDrsGroupMemberManager
+ vmware_drs_group = VmwareDrsGroupMemberManager(module=module)
+ vmware_drs_group.manage_drs_group_members()
+
+ # Set results
+ results = dict(msg=vmware_drs_group.message,
+ failed=False,
+ changed=vmware_drs_group.changed,
+ drs_group_member_info=vmware_drs_group.result)
+
+ except Exception as error:
+ results = dict(failed=True, msg="Error: %s" % error)
+
+ if results['failed']:
+ module.fail_json(**results)
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_drs_rule_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_drs_rule_info.py
new file mode 100644
index 000000000..b401c2b26
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_drs_rule_info.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_drs_rule_info
+short_description: Gathers info about DRS rule on the given cluster
+description:
+- 'This module can be used to gather information about DRS VM-VM and VM-HOST rules from the given cluster.'
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - DRS information for the given cluster will be returned.
+ - This is required parameter if C(datacenter) parameter is not provided.
+ type: str
+ datacenter:
+ description:
+ - Name of the datacenter.
+ - DRS information for all the clusters from the given datacenter will be returned.
+ - This is required parameter if C(cluster_name) parameter is not provided.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather DRS info about given Cluster
+ community.vmware.vmware_drs_rule_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ delegate_to: localhost
+ register: cluster_drs_info
+
+- name: Gather DRS info about all Clusters in given datacenter
+ community.vmware.vmware_drs_rule_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: '{{ datacenter_name }}'
+ delegate_to: localhost
+ register: datacenter_drs_info
+'''
+
+RETURN = r'''
+drs_rule_info:
+ description: metadata about DRS rule from given cluster / datacenter
+ returned: always
+ type: dict
+ sample: {
+ "DC0_C0": [
+ {
+ "rule_affinity": false,
+ "rule_enabled": true,
+ "rule_key": 1,
+ "rule_mandatory": true,
+ "rule_name": "drs_rule_0001",
+ "rule_type": "vm_vm_rule",
+ "rule_uuid": "52be5061-665a-68dc-3d25-85cd2d37e114",
+ "rule_vms": [
+ "VM_65",
+ "VM_146"
+ ]
+ },
+ ],
+ "DC1_C1": [
+ {
+ "rule_affine_host_group_name": "host_group_1",
+ "rule_affine_hosts": [
+ "10.76.33.204"
+ ],
+ "rule_anti_affine_host_group_name": null,
+ "rule_anti_affine_hosts": [],
+ "rule_enabled": true,
+ "rule_key": 1,
+ "rule_mandatory": false,
+ "rule_name": "vm_host_rule_0001",
+ "rule_type": "vm_host_rule",
+ "rule_uuid": "52687108-4d3a-76f2-d29c-b708c40dbe40",
+ "rule_vm_group_name": "test_vm_group_1",
+ "rule_vms": [
+ "VM_8916",
+ "VM_4010"
+ ]
+ }
+ ],
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs
+
+
+class VmwareDrsInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareDrsInfoManager, self).__init__(module)
+
+ datacenter_name = self.params.get('datacenter', None)
+ if datacenter_name:
+ datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
+ self.cluster_obj_list = []
+ if datacenter_obj:
+ folder = datacenter_obj.hostFolder
+ self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder)
+ else:
+ self.module.fail_json(changed=False, msg="Datacenter '%s' not found" % datacenter_name)
+
+ cluster_name = self.params.get('cluster_name', None)
+ if cluster_name:
+ cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
+ if cluster_obj is None:
+ self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
+ else:
+ self.cluster_obj_list = [cluster_obj]
+
+ def get_all_from_group(self, group_name=None, cluster_obj=None, hostgroup=False):
+ """
+ Return all VM / Host names using given group name
+ Args:
+ group_name: Rule name
+ cluster_obj: Cluster managed object
+ hostgroup: True if we want only host name from group
+
+ Returns: List of VM / Host names belonging to given group object
+
+ """
+ obj_name_list = []
+ if not all([group_name, cluster_obj]):
+ return obj_name_list
+
+ for group in cluster_obj.configurationEx.group:
+ if group.name == group_name:
+ if not hostgroup and isinstance(group, vim.cluster.VmGroup):
+ obj_name_list = [vm.name for vm in group.vm]
+ break
+ if hostgroup and isinstance(group, vim.cluster.HostGroup):
+ obj_name_list = [host.name for host in group.host]
+ break
+
+ return obj_name_list
+
+ @staticmethod
+ def normalize_vm_vm_rule_spec(rule_obj=None):
+ """
+ Return human readable rule spec
+ Args:
+ rule_obj: Rule managed object
+
+ Returns: Dictionary with DRS VM VM Rule info
+
+ """
+ if rule_obj is None:
+ return {}
+ return dict(rule_key=rule_obj.key,
+ rule_enabled=rule_obj.enabled,
+ rule_name=rule_obj.name,
+ rule_mandatory=rule_obj.mandatory,
+ rule_uuid=rule_obj.ruleUuid,
+ rule_vms=[vm.name for vm in rule_obj.vm],
+ rule_type="vm_vm_rule",
+ rule_affinity=True if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) else False,
+ )
+
+ def normalize_vm_host_rule_spec(self, rule_obj=None, cluster_obj=None):
+ """
+ Return human readable rule spec
+ Args:
+ rule_obj: Rule managed object
+ cluster_obj: Cluster managed object
+
+ Returns: Dictionary with DRS VM HOST Rule info
+
+ """
+ if not all([rule_obj, cluster_obj]):
+ return {}
+ return dict(rule_key=rule_obj.key,
+ rule_enabled=rule_obj.enabled,
+ rule_name=rule_obj.name,
+ rule_mandatory=rule_obj.mandatory,
+ rule_uuid=rule_obj.ruleUuid,
+ rule_vm_group_name=rule_obj.vmGroupName,
+ rule_affine_host_group_name=rule_obj.affineHostGroupName,
+ rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName,
+ rule_vms=self.get_all_from_group(group_name=rule_obj.vmGroupName,
+ cluster_obj=cluster_obj),
+ rule_affine_hosts=self.get_all_from_group(group_name=rule_obj.affineHostGroupName,
+ cluster_obj=cluster_obj,
+ hostgroup=True),
+ rule_anti_affine_hosts=self.get_all_from_group(group_name=rule_obj.antiAffineHostGroupName,
+ cluster_obj=cluster_obj,
+ hostgroup=True),
+ rule_type="vm_host_rule",
+ )
+
+ def gather_drs_rule_info(self):
+ """
+ Gather DRS rule information about given cluster
+ Returns: Dictionary of clusters with DRS information
+
+ """
+ cluster_rule_info = dict()
+ for cluster_obj in self.cluster_obj_list:
+ cluster_rule_info[cluster_obj.name] = []
+ for drs_rule in cluster_obj.configuration.rule:
+ if isinstance(drs_rule, vim.cluster.VmHostRuleInfo):
+ cluster_rule_info[cluster_obj.name].append(self.normalize_vm_host_rule_spec(
+ rule_obj=drs_rule,
+ cluster_obj=cluster_obj))
+ else:
+ cluster_rule_info[cluster_obj.name].append(self.normalize_vm_vm_rule_spec(rule_obj=drs_rule))
+
+ return cluster_rule_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(type='str', required=False),
+ cluster_name=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'datacenter'],
+ ],
+ supports_check_mode=True,
+ )
+
+ vmware_drs_info = VmwareDrsInfoManager(module)
+ module.exit_json(changed=False, drs_rule_info=vmware_drs_info.gather_drs_rule_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_dvs_host.py b/ansible_collections/community/vmware/plugins/modules/vmware_dvs_host.py
new file mode 100644
index 000000000..790151c96
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_dvs_host.py
@@ -0,0 +1,411 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2019, VMware Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_dvs_host
+short_description: Add or remove a host from distributed virtual switch
+description:
+ - Manage a host system from distributed virtual switch.
+author:
+- Joseph Callen (@jcpowermac)
+- Abhijeet Kasurde (@Akasurde)
+- Joseph Andreatta (@vmwjoseph)
+options:
+ esxi_hostname:
+ description:
+ - The ESXi hostname.
+ required: true
+ type: str
+ switch_name:
+ description:
+ - The name of the Distributed vSwitch.
+ required: true
+ type: str
+ vmnics:
+ description:
+ - The ESXi hosts vmnics to use with the Distributed vSwitch.
+ required: false
+ type: list
+ default: []
+ elements: str
+ lag_uplinks:
+ required: false
+ type: list
+ default: []
+ elements: dict
+ description:
+ - The ESXi hosts vmnics to use with specific LAGs.
+ suboptions:
+ lag:
+ description:
+ - Name of the LAG.
+ type: str
+ required: true
+ vmnics:
+ description:
+ - The ESXi hosts vmnics to use with the LAG.
+ required: false
+ type: list
+ default: []
+ elements: str
+ state:
+ description:
+ - If the host should be present or absent attached to the vSwitch.
+ choices: [ present, absent ]
+ default: 'present'
+ type: str
+ vendor_specific_config:
+ description:
+ - List of key, value dictionaries for the Vendor Specific Configuration.
+ suboptions:
+ key:
+ description:
+ - Key of setting.
+ type: str
+ required: true
+ value:
+ description:
+ - Value of setting.
+ type: str
+ required: true
+ required: false
+ type: list
+ elements: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add Host to dVS
+ community.vmware.vmware_dvs_host:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ switch_name: dvSwitch
+ vmnics:
+ - vmnic0
+ - vmnic1
+ state: present
+ delegate_to: localhost
+
+- name: Add vmnics to LAGs
+ community.vmware.vmware_dvs_host:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ switch_name: dvSwitch
+ lag_uplinks:
+ - lag: lag1
+ vmnics:
+ - vmnic0
+ - vmnic1
+ - lag: lag2
+ vmnics:
+ - vmnic2
+ - vmnic3
+ state: present
+ delegate_to: localhost
+
+- name: Add Host to dVS/enable learnswitch (https://labs.vmware.com/flings/learnswitch)
+ community.vmware.vmware_dvs_host:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ switch_name: dvSwitch
+ vendor_specific_config:
+ - key: com.vmware.netoverlay.layer1
+ value: learnswitch
+ vmnics:
+ - vmnic0
+ - vmnic1
+ state: present
+ delegate_to: localhost
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ find_dvs_by_name,
+ find_hostsystem_by_name,
+ vmware_argument_spec,
+ wait_for_task)
+from ansible.module_utils._text import to_native
+
+
+class VMwareDvsHost(PyVmomi):
+ def __init__(self, module):
+ super(VMwareDvsHost, self).__init__(module)
+ self.uplink_portgroup = None
+ self.host = None
+ self.dv_switch = None
+ self.desired_state = {}
+
+ self.state = self.module.params['state']
+ self.switch_name = self.module.params['switch_name']
+ self.esxi_hostname = self.module.params['esxi_hostname']
+ self.vmnics = self.module.params['vmnics']
+ self.lag_uplinks = self.module.params['lag_uplinks']
+ self.vendor_specific_config = self.module.params['vendor_specific_config']
+
+ self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
+
+ if self.dv_switch is None:
+ self.module.fail_json(msg="A distributed virtual switch %s "
+ "does not exist" % self.switch_name)
+
+ self.lags = {}
+ for lag in self.dv_switch.config.lacpGroupConfig:
+ self.lags[lag.name] = lag
+
+ for lag_uplink in self.lag_uplinks:
+ if lag_uplink['lag'] not in self.lags:
+ self.module.fail_json(msg="LAG %s not found" % lag_uplink['lag'])
+
+ def process_state(self):
+ dvs_host_states = {
+ 'absent': {
+ 'present': self.state_destroy_dvs_host,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_dvs_host,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_dvs_host,
+ }
+ }
+
+ try:
+ dvs_host_states[self.state][self.check_dvs_host_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ def find_dvs_uplink_pg(self):
+ # There should only always be a single uplink port group on
+ # a distributed virtual switch
+ dvs_uplink_pg = self.dv_switch.config.uplinkPortgroup[0] if len(self.dv_switch.config.uplinkPortgroup) else None
+ return dvs_uplink_pg
+
+ # operation should be edit, add and remove
+ def modify_dvs_host(self, operation):
+ changed, result = False, None
+ spec = vim.DistributedVirtualSwitch.ConfigSpec()
+ spec.configVersion = self.dv_switch.config.configVersion
+ spec.host = [vim.dvs.HostMember.ConfigSpec()]
+ spec.host[0].operation = operation
+ spec.host[0].host = self.host
+ if self.vendor_specific_config:
+ config = list()
+ for item in self.vendor_specific_config:
+ config.append(vim.dvs.KeyedOpaqueBlob(key=item['key'], opaqueData=item['value']))
+ spec.host[0].vendorSpecificConfig = config
+
+ if operation == "edit":
+ spec.host[0].backing = vim.dvs.HostMember.PnicBacking()
+
+ for nic, uplinkPortKey in self.desired_state.items():
+ pnicSpec = vim.dvs.HostMember.PnicSpec()
+ pnicSpec.pnicDevice = nic
+ pnicSpec.uplinkPortgroupKey = self.uplink_portgroup.key
+ pnicSpec.uplinkPortKey = uplinkPortKey
+ spec.host[0].backing.pnicSpec.append(pnicSpec)
+
+ try:
+ task = self.dv_switch.ReconfigureDvs_Task(spec)
+ changed, result = wait_for_task(task)
+ except vmodl.fault.NotSupported as not_supported:
+ self.module.fail_json(msg="Failed to configure DVS host %s as it is not"
+ " compatible with the VDS version." % self.esxi_hostname,
+ details=to_native(not_supported.msg))
+ return changed, result
+
+ def state_destroy_dvs_host(self):
+ operation, changed, result = ("remove", True, None)
+
+ if not self.module.check_mode:
+ changed, result = self.modify_dvs_host(operation)
+ self.module.exit_json(changed=changed, result=to_native(result))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_update_dvs_host(self):
+ operation, changed, result = ("edit", True, None)
+
+ if not self.module.check_mode:
+ changed, result = self.modify_dvs_host(operation)
+ self.module.exit_json(changed=changed, result=to_native(result))
+
+ def state_create_dvs_host(self):
+ operation, changed, result = ("add", True, None)
+
+ if not self.module.check_mode:
+ changed, result = self.modify_dvs_host(operation)
+ if changed:
+ self.set_desired_state()
+ changed, result = self.modify_dvs_host("edit")
+ else:
+ self.module.exit_json(changed=changed, result=to_native(result))
+
+ self.module.exit_json(changed=changed, result=to_native(result))
+
+ def find_host_attached_dvs(self):
+ for dvs_host_member in self.dv_switch.config.host:
+ if dvs_host_member.config.host.name == self.esxi_hostname:
+ return dvs_host_member.config.host
+
+ return None
+
+ def set_desired_state(self):
+ lag_uplinks = []
+ switch_uplink_ports = {'non_lag': []}
+
+ portCriteria = vim.dvs.PortCriteria()
+ portCriteria.host = [self.host]
+ portCriteria.portgroupKey = self.uplink_portgroup.key
+ portCriteria.uplinkPort = True
+ ports = self.dv_switch.FetchDVPorts(portCriteria)
+
+ for name, lag in self.lags.items():
+ switch_uplink_ports[name] = []
+ for uplinkName in lag.uplinkName:
+ for port in ports:
+ if port.config.name == uplinkName:
+ switch_uplink_ports[name].append(port.key)
+ lag_uplinks.append(port.key)
+
+ for port in sorted(ports, key=lambda port: port.config.name):
+ if port.key in self.uplink_portgroup.portKeys and port.key not in lag_uplinks:
+ switch_uplink_ports['non_lag'].append(port.key)
+
+ count = 0
+ for vmnic in self.vmnics:
+ self.desired_state[vmnic] = switch_uplink_ports['non_lag'][count]
+ count += 1
+
+ for lag in self.lag_uplinks:
+ count = 0
+ for vmnic in lag['vmnics']:
+ self.desired_state[vmnic] = switch_uplink_ports[lag['lag']][count]
+ count += 1
+
+ def check_uplinks(self):
+ pnic_device = []
+
+ self.set_desired_state()
+
+ for dvs_host_member in self.dv_switch.config.host:
+ if dvs_host_member.config.host.name == self.esxi_hostname:
+ break
+
+ for pnicSpec in dvs_host_member.config.backing.pnicSpec:
+ pnic_device.append(pnicSpec.pnicDevice)
+ if pnicSpec.pnicDevice not in self.desired_state:
+ return False
+ if pnicSpec.uplinkPortKey != self.desired_state[pnicSpec.pnicDevice]:
+ return False
+
+ for vmnic in self.desired_state:
+ if vmnic not in pnic_device:
+ return False
+
+ return True
+
+ def check_dvs_host_state(self):
+ self.uplink_portgroup = self.find_dvs_uplink_pg()
+
+ if self.uplink_portgroup is None:
+ self.module.fail_json(msg="An uplink portgroup does not exist on"
+ " the distributed virtual switch %s" % self.switch_name)
+
+ self.host = self.find_host_attached_dvs()
+
+ if self.host is None:
+ # We still need the HostSystem object to add the host
+ # to the distributed vswitch
+ self.host = find_hostsystem_by_name(self.content, self.esxi_hostname)
+ if self.host is None:
+ self.module.fail_json(msg="The esxi_hostname %s does not exist "
+ "in vCenter" % self.esxi_hostname)
+ return 'absent'
+ # Skip checking uplinks if the host should be absent, anyway
+ elif self.state == 'absent':
+ return 'present'
+ else:
+ if self.check_uplinks():
+ return 'present'
+ else:
+ return 'update'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ esxi_hostname=dict(required=True, type='str'),
+ switch_name=dict(required=True, type='str'),
+ vmnics=dict(required=False, type='list', default=[], elements='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ vendor_specific_config=dict(
+ type='list',
+ elements='dict',
+ required=False,
+ options=dict(
+ key=dict(type='str', required=True, no_log=False),
+ value=dict(type='str', required=True),
+ ),
+ ),
+ lag_uplinks=dict(
+ type='list',
+ default=[],
+ required=False,
+ elements='dict',
+ options=dict(
+ lag=dict(
+ type='str',
+ required=True,
+ ),
+ vmnics=dict(
+ type='list',
+ required=False,
+ elements='str',
+ default=[],
+ ),
+ ),
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ vmware_dvs_host = VMwareDvsHost(module)
+ vmware_dvs_host.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup.py b/ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup.py
new file mode 100644
index 000000000..83cd59f25
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup.py
@@ -0,0 +1,1030 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_dvs_portgroup
+short_description: Create or remove a Distributed vSwitch portgroup.
+description:
+ - Create or remove a Distributed vSwitch portgroup.
+author:
+ - Joseph Callen (@jcpowermac)
+ - Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
+options:
+ portgroup_name:
+ description:
+ - The name of the portgroup that is to be created or deleted.
+ required: true
+ type: str
+ switch_name:
+ description:
+ - The name of the distributed vSwitch the port group should be created on.
+ required: true
+ type: str
+ vlan_id:
+ description:
+ - The VLAN ID that should be configured with the portgroup, use 0 for no VLAN.
+ - 'If C(vlan_trunk) is configured to be I(true), this can be a combination of multiple ranges and numbers, example: 1-200, 205, 400-4094.'
+ - The valid C(vlan_id) range is from 0 to 4094. Overlapping ranges are allowed.
+ - 'If C(vlan_private) is configured to be I(true), the corresponding private VLAN should already be configured in the distributed vSwitch.'
+ required: true
+ type: str
+ num_ports:
+ description:
+ - The number of ports the portgroup should contain.
+ type: int
+ port_binding:
+ description:
+ - The type of port binding determines when ports in a port group are assigned to virtual machines.
+ - See VMware KB 1022312 U(https://kb.vmware.com/s/article/1022312) for more details.
+ required: true
+ type: str
+ choices:
+ - 'static'
+ - 'ephemeral'
+ port_allocation:
+ description:
+ - Elastic port groups automatically increase or decrease the number of ports as needed.
+ - Only valid if I(port_binding) is set to C(static).
+ - Will be C(elastic) if not specified and I(port_binding) is set to C(static).
+ - Will be C(fixed) if not specified and I(port_binding) is set to C(ephemeral).
+ type: str
+ choices:
+ - 'elastic'
+ - 'fixed'
+ state:
+ description:
+ - Determines if the portgroup should be present or not.
+ required: true
+ type: str
+ choices:
+ - 'present'
+ - 'absent'
+ vlan_trunk:
+ description:
+ - Indicates whether this is a VLAN trunk or not.
+ - Mutually exclusive with C(vlan_private) parameter.
+ required: false
+ default: false
+ type: bool
+ vlan_private:
+ description:
+ - Indicates whether this is for a private VLAN or not.
+ - Mutually exclusive with C(vlan_trunk) parameter.
+ required: false
+ default: false
+ type: bool
+ mac_learning:
+ description:
+ - Dictionary which configures MAC learning for portgroup.
+ suboptions:
+ allow_unicast_flooding:
+ type: bool
+ description: The flag to allow flooding of unlearned MAC for ingress traffic.
+ required: false
+ enabled:
+ type: bool
+ description: The flag to indicate if source MAC address learning is allowed.
+ required: false
+ limit:
+ type: int
+ description: The maximum number of MAC addresses that can be learned.
+ required: false
+ limit_policy:
+ type: str
+ description: The default switching policy after MAC limit is exceeded.
+ required: false
+ choices:
+ - 'allow'
+ - 'drop'
+ type: dict
+ network_policy:
+ description:
+ - Dictionary which configures the different security values for portgroup.
+ suboptions:
+ inherited:
+ type: bool
+ description: Inherit the settings from the switch or not.
+ required: true
+ promiscuous:
+ type: bool
+ description: Indicates whether promiscuous mode is allowed. Ignored if C(inherited) is true.
+ forged_transmits:
+ type: bool
+ description: Indicates whether forged transmits are allowed. Ignored if C(inherited) is true.
+ mac_changes:
+ type: bool
+ description: Indicates whether mac changes are allowed. Ignored if C(inherited) is true.
+ required: false
+ type: dict
+ teaming_policy:
+ description:
+ - Dictionary which configures the different teaming values for portgroup.
+ suboptions:
+ load_balance_policy:
+ description:
+ - Network adapter teaming policy.
+ - C(loadbalance_loadbased) is available from version 2.6 and onwards.
+ default: 'loadbalance_srcid'
+ type: str
+ choices:
+ - loadbalance_ip
+ - loadbalance_srcmac
+ - loadbalance_srcid
+ - loadbalance_loadbased
+ - failover_explicit
+ notify_switches:
+ description:
+ - Indicate whether or not to notify the physical switch if a link fails.
+ default: true
+ type: bool
+ inbound_policy:
+ description:
+ - Indicate whether or not the teaming policy is applied to inbound frames as well.
+ type: bool
+ rolling_order:
+ description:
+ - Indicate whether or not to use a rolling policy when restoring links.
+ default: false
+ type: bool
+ active_uplinks:
+ description:
+ - List of active uplinks used for load balancing.
+ type: list
+ elements: str
+ standby_uplinks:
+ description:
+ - List of standby uplinks used for failover.
+ type: list
+ elements: str
+ default: {
+ 'notify_switches': true,
+ 'load_balance_policy': 'loadbalance_srcid',
+ 'rolling_order': false
+ }
+ type: dict
+ port_policy:
+ description:
+ - Dictionary which configures the advanced policy settings for the portgroup.
+ suboptions:
+ block_override:
+ description:
+ - Indicates if the block policy can be changed per port.
+ default: true
+ type: bool
+ port_config_reset_at_disconnect:
+ description:
+ - Indicates if the configuration of a port is reset automatically after disconnect.
+ default: true
+ type: bool
+ required: false
+ ipfix_override:
+ description:
+ - Indicates if the ipfix policy can be changed per port.
+ default: false
+ type: bool
+ live_port_move:
+ description:
+ - Indicates if a live port can be moved in or out of the portgroup.
+ default: false
+ type: bool
+ network_rp_override:
+ description:
+ - Indicates if the network resource pool can be changed per port.
+ default: false
+ type: bool
+ mac_management_override:
+ description:
+ - Indicates if the security policy can be changed per port.
+ default: false
+ aliases: ['security_override']
+ type: bool
+ shaping_override:
+ description:
+ - Indicates if the shaping policy can be changed per port.
+ default: false
+ type: bool
+ traffic_filter_override:
+ description:
+ - Indicates if the traffic filter can be changed per port.
+ default: false
+ type: bool
+ uplink_teaming_override:
+ description:
+ - Indicates if the uplink teaming policy can be changed per port.
+ default: false
+ type: bool
+ vendor_config_override:
+ description:
+ - Indicates if the vendor config can be changed per port.
+ type: bool
+ default: false
+ vlan_override:
+ description:
+ - Indicates if the vlan can be changed per port.
+ type: bool
+ default: false
+ default: {
+ 'traffic_filter_override': false,
+ 'network_rp_override': false,
+ 'live_port_move': false,
+ 'mac_management_override': false,
+ 'vendor_config_override': false,
+ 'port_config_reset_at_disconnect': true,
+ 'uplink_teaming_override': false,
+ 'block_override': true,
+ 'shaping_override': false,
+ 'vlan_override': false,
+ 'ipfix_override': false
+ }
+ type: dict
+ net_flow:
+ description:
+ - Indicate whether or not the virtual machine IP traffic that flows through a vds gets analyzed by sending reports to a NetFlow collector.
+ required: false
+ type: 'str'
+ choices:
+ - 'true'
+ - 'on'
+ - 'yes'
+ - 'false'
+ - 'off'
+ - 'no'
+ - 'inherited'
+ version_added: '2.3.0'
+ in_traffic_shaping:
+ description:
+ - Dictionary which configures the ingress traffic shaping settings for the portgroup.
+ suboptions:
+ inherited:
+ type: bool
+ description: Inherit the settings from the switch or not.
+ required: true
+ enabled:
+ type: bool
+ description:
+ - Indicates whether ingress traffic shaping is activated or not.
+ - Ignored if C(inherited) is true.
+ average_bandwidth:
+ type: int
+ description:
+ - Establishes the number of bits per second to allow across a port, averaged over time, that is, the allowed average load.
+ - Ignored if C(inherited) is true.
+ burst_size:
+ type: int
+ description:
+ - The maximum number of bits per second to allow across a port when it is sending/sending or receiving a burst of traffic.
+ - Ignored if C(inherited) is true.
+ peak_bandwidth:
+ type: int
+ description:
+ - The maximum number of bytes to allow in a burst.
+ - Ignored if C(inherited) is true.
+ required: false
+ type: dict
+ version_added: '2.3.0'
+ out_traffic_shaping:
+ description:
+ - Dictionary which configures the egress traffic shaping settings for the portgroup.
+ suboptions:
+ inherited:
+ type: bool
+ description:
+ - Inherit the settings from the switch or not.
+ required: true
+ enabled:
+ type: bool
+ description:
+ - Indicates whether egress traffic shaping is activated or not.
+ - Ignored if C(inherited) is true.
+ average_bandwidth:
+ type: int
+ description:
+ - Establishes the number of bits per second to allow across a port, averaged over time, that is, the allowed average load.
+ - Ignored if C(inherited) is true.
+ burst_size:
+ type: int
+ description:
+ - The maximum number of bits per second to allow across a port when it is sending/sending or receiving a burst of traffic.
+ - Ignored if C(inherited) is true.
+ peak_bandwidth:
+ type: int
+ description:
+ - The maximum number of bytes to allow in a burst.
+ - Ignored if C(inherited) is true.
+ required: false
+ type: dict
+ version_added: '2.3.0'
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create vlan portgroup
+ community.vmware.vmware_dvs_portgroup:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ portgroup_name: vlan-123-portrgoup
+ switch_name: dvSwitch
+ vlan_id: 123
+ num_ports: 120
+ port_binding: static
+ state: present
+ delegate_to: localhost
+
+- name: Create vlan trunk portgroup
+ community.vmware.vmware_dvs_portgroup:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ portgroup_name: vlan-trunk-portrgoup
+ switch_name: dvSwitch
+ vlan_id: 1-1000, 1005, 1100-1200
+ vlan_trunk: true
+ num_ports: 120
+ port_binding: static
+ state: present
+ delegate_to: localhost
+
+- name: Create private vlan portgroup
+ vmware_dvs_portgroup:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ portgroup_name: private-vlan-portrgoup
+ switch_name: dvSwitch
+ vlan_id: 1001
+ vlan_private: true
+ num_ports: 120
+ port_binding: static
+ state: present
+ delegate_to: localhost
+
+- name: Create no-vlan portgroup
+ community.vmware.vmware_dvs_portgroup:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ portgroup_name: no-vlan-portrgoup
+ switch_name: dvSwitch
+ vlan_id: 0
+ num_ports: 120
+ port_binding: static
+ state: present
+ delegate_to: localhost
+
+- name: Create vlan portgroup with all security and port policies
+ community.vmware.vmware_dvs_portgroup:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ portgroup_name: vlan-123-portrgoup
+ switch_name: dvSwitch
+ vlan_id: 123
+ num_ports: 120
+ port_binding: static
+ state: present
+ network_policy:
+ inherited: false
+ promiscuous: true
+ forged_transmits: true
+ mac_changes: true
+ port_policy:
+ block_override: true
+ ipfix_override: true
+ live_port_move: true
+ network_rp_override: true
+ port_config_reset_at_disconnect: true
+ mac_management_override: true
+ shaping_override: true
+ traffic_filter_override: true
+ uplink_teaming_override: true
+ vendor_config_override: true
+ vlan_override: true
+ delegate_to: localhost
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ find_dvs_by_name,
+ find_dvspg_by_name,
+ is_boolean,
+ is_truthy,
+ vmware_argument_spec,
+ wait_for_task)
+
+
+class VMwareDvsPortgroup(PyVmomi):
+ def __init__(self, module):
+ super(VMwareDvsPortgroup, self).__init__(module)
+ self.dvs_portgroup = None
+ self.dv_switch = None
+
+ self.port_allocation = self.module.params['port_allocation']
+ if self.port_allocation is None:
+ if self.module.params['port_binding'] == 'ephemeral':
+ self.port_allocation = 'fixed'
+ else:
+ self.port_allocation = 'elastic'
+
+ # Some sanity checks
+ if self.port_allocation == 'elastic' and self.module.params['port_binding'] == 'ephemeral':
+ self.module.fail_json(
+ msg="'elastic' port allocation is not supported on an 'ephemeral' portgroup."
+ )
+
+ def create_vlan_list(self):
+ vlan_id_list = []
+ for vlan_id_splitted in self.module.params['vlan_id'].split(','):
+ vlans = vlan_id_splitted.split('-')
+ if len(vlans) > 2:
+ self.module.fail_json(msg="Invalid VLAN range %s." % vlan_id_splitted)
+ if len(vlans) == 2:
+ vlan_id_start = vlans[0].strip()
+ vlan_id_end = vlans[1].strip()
+ if not vlan_id_start.isdigit():
+ self.module.fail_json(msg="Invalid VLAN %s." % vlan_id_start)
+ if not vlan_id_end.isdigit():
+ self.module.fail_json(msg="Invalid VLAN %s." % vlan_id_end)
+ vlan_id_start = int(vlan_id_start)
+ vlan_id_end = int(vlan_id_end)
+ if vlan_id_start not in range(0, 4095) or vlan_id_end not in range(0, 4095):
+ self.module.fail_json(msg="vlan_id range %s specified is incorrect. The valid vlan_id range is from 0 to 4094." % vlan_id_splitted)
+ vlan_id_list.append((vlan_id_start, vlan_id_end))
+ else:
+ vlan_id = vlans[0].strip()
+ if not vlan_id.isdigit():
+ self.module.fail_json(msg="Invalid VLAN %s." % vlan_id)
+ vlan_id = int(vlan_id)
+ vlan_id_list.append((vlan_id, vlan_id))
+
+ vlan_id_list.sort()
+
+ return vlan_id_list
+
+ def build_config(self):
+ config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+
+ # Basic config
+ config.name = self.module.params['portgroup_name']
+
+ if self.module.params['port_allocation'] != 'elastic' and self.module.params['port_binding'] != 'ephemeral':
+ config.numPorts = self.module.params['num_ports']
+
+ # Default port config
+ config.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+ if self.module.params['vlan_trunk']:
+ config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec()
+ config.defaultPortConfig.vlan.vlanId = list(map(lambda x: vim.NumericRange(start=x[0], end=x[1]), self.create_vlan_list()))
+ elif self.module.params['vlan_private']:
+ # Check that private VLAN exists in dvs
+ if self.module.params['vlan_private']:
+ pvlan_exists = self.check_dvs_pvlan()
+ if not pvlan_exists:
+ self.module.fail_json(msg="No private vlan with id %s in distributed vSwitch %s"
+ % (self.module.params['vlan_id'], self.module.params['switch_name']))
+
+ config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec()
+ config.defaultPortConfig.vlan.pvlanId = int(self.module.params['vlan_id'])
+ else:
+ config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
+ config.defaultPortConfig.vlan.vlanId = int(self.module.params['vlan_id'])
+
+ config.defaultPortConfig.vlan.inherited = False
+
+ if self.module.params['network_policy'] is not None:
+ config.defaultPortConfig.macManagementPolicy = vim.dvs.VmwareDistributedVirtualSwitch.MacManagementPolicy()
+ config.defaultPortConfig.macManagementPolicy.inherited = self.module.params['network_policy']['inherited']
+ if not self.module.params['network_policy']['inherited']:
+ config.defaultPortConfig.macManagementPolicy.allowPromiscuous = self.module.params['network_policy']['promiscuous']
+ config.defaultPortConfig.macManagementPolicy.forgedTransmits = self.module.params['network_policy']['forged_transmits']
+ config.defaultPortConfig.macManagementPolicy.macChanges = self.module.params['network_policy']['mac_changes']
+
+ macLearning = self.module.params['mac_learning']
+ if macLearning:
+ if config.defaultPortConfig.macManagementPolicy is None:
+ config.defaultPortConfig.macManagementPolicy = vim.dvs.VmwareDistributedVirtualSwitch.MacManagementPolicy()
+ macLearningPolicy = vim.dvs.VmwareDistributedVirtualSwitch.MacLearningPolicy()
+ if macLearning['allow_unicast_flooding'] is not None:
+ macLearningPolicy.allowUnicastFlooding = macLearning['allow_unicast_flooding']
+ if macLearning['enabled'] is not None:
+ macLearningPolicy.enabled = macLearning['enabled']
+ if macLearning['limit'] is not None:
+ macLearningPolicy.limit = macLearning['limit']
+ if macLearning['limit_policy']:
+ macLearningPolicy.limitPolicy = macLearning['limit_policy']
+ config.defaultPortConfig.macManagementPolicy.macLearningPolicy = macLearningPolicy
+
+ # Teaming Policy
+ teamingPolicy = vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortTeamingPolicy()
+ teamingPolicy.policy = vim.StringPolicy(value=self.module.params['teaming_policy']['load_balance_policy'])
+ if self.module.params['teaming_policy']['inbound_policy'] is not None:
+ teamingPolicy.reversePolicy = vim.BoolPolicy(value=self.module.params['teaming_policy']['inbound_policy'])
+ teamingPolicy.notifySwitches = vim.BoolPolicy(value=self.module.params['teaming_policy']['notify_switches'])
+ teamingPolicy.rollingOrder = vim.BoolPolicy(value=self.module.params['teaming_policy']['rolling_order'])
+
+ if self.module.params['teaming_policy']['active_uplinks'] or self.module.params['teaming_policy']['standby_uplinks']:
+ teamingPolicy.uplinkPortOrder = vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortOrderPolicy()
+ if self.module.params['teaming_policy']['active_uplinks']:
+ teamingPolicy.uplinkPortOrder.activeUplinkPort = self.module.params['teaming_policy']['active_uplinks']
+ if self.module.params['teaming_policy']['standby_uplinks']:
+ teamingPolicy.uplinkPortOrder.standbyUplinkPort = self.module.params['teaming_policy']['standby_uplinks']
+
+ config.defaultPortConfig.uplinkTeamingPolicy = teamingPolicy
+
+ # PG policy (advanced_policy)
+ config.policy = vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy()
+ config.policy.blockOverrideAllowed = self.module.params['port_policy']['block_override']
+ config.policy.ipfixOverrideAllowed = self.module.params['port_policy']['ipfix_override']
+ config.policy.livePortMovingAllowed = self.module.params['port_policy']['live_port_move']
+ config.policy.macManagementOverrideAllowed = self.module.params['port_policy']['mac_management_override']
+ config.policy.networkResourcePoolOverrideAllowed = self.module.params['port_policy']['network_rp_override']
+ config.policy.portConfigResetAtDisconnect = self.module.params['port_policy']['port_config_reset_at_disconnect']
+ config.policy.securityPolicyOverrideAllowed = self.module.params['port_policy']['mac_management_override']
+ config.policy.shapingOverrideAllowed = self.module.params['port_policy']['shaping_override']
+ config.policy.trafficFilterOverrideAllowed = self.module.params['port_policy']['traffic_filter_override']
+ config.policy.uplinkTeamingOverrideAllowed = self.module.params['port_policy']['uplink_teaming_override']
+ config.policy.vendorConfigOverrideAllowed = self.module.params['port_policy']['vendor_config_override']
+ config.policy.vlanOverrideAllowed = self.module.params['port_policy']['vlan_override']
+
+ # NetFlow
+ net_flow = self.module.params['net_flow']
+ if net_flow is not None:
+ config.defaultPortConfig.ipfixEnabled = vim.BoolPolicy()
+ if is_boolean(net_flow):
+ config.defaultPortConfig.ipfixEnabled.inherited = False
+ config.defaultPortConfig.ipfixEnabled.value = is_truthy(net_flow)
+ else:
+ config.defaultPortConfig.ipfixEnabled.inherited = True
+
+ # Ingress traffic shaping
+ config.defaultPortConfig.inShapingPolicy = vim.dvs.DistributedVirtualPort.TrafficShapingPolicy()
+ config.defaultPortConfig.inShapingPolicy.averageBandwidth = vim.LongPolicy()
+ config.defaultPortConfig.inShapingPolicy.burstSize = vim.LongPolicy()
+ config.defaultPortConfig.inShapingPolicy.peakBandwidth = vim.LongPolicy()
+ config.defaultPortConfig.inShapingPolicy.enabled = vim.BoolPolicy()
+
+ in_traffic_shaping = self.module.params['in_traffic_shaping']
+ if in_traffic_shaping is not None:
+ if in_traffic_shaping['inherited'] is False:
+ config.defaultPortConfig.inShapingPolicy.inherited = False
+
+ # enabled
+ config.defaultPortConfig.inShapingPolicy.enabled.inherited = False
+ config.defaultPortConfig.inShapingPolicy.enabled.value = in_traffic_shaping['enabled']
+
+ # adverage bandwidth
+ config.defaultPortConfig.inShapingPolicy.averageBandwidth.inherited = False
+ config.defaultPortConfig.inShapingPolicy.averageBandwidth.value = in_traffic_shaping['average_bandwidth'] * 1000
+
+ # burst size
+ config.defaultPortConfig.inShapingPolicy.burstSize.inherited = False
+ config.defaultPortConfig.inShapingPolicy.burstSize.value = in_traffic_shaping['burst_size'] * 1024
+
+ # peak bandwidth
+ config.defaultPortConfig.inShapingPolicy.peakBandwidth.inherited = False
+ config.defaultPortConfig.inShapingPolicy.peakBandwidth.value = in_traffic_shaping['peak_bandwidth'] * 1000
+ else:
+ config.defaultPortConfig.inShapingPolicy.inherited = True
+ config.defaultPortConfig.inShapingPolicy.enabled.inherited = True
+ config.defaultPortConfig.inShapingPolicy.averageBandwidth.inherited = True
+ config.defaultPortConfig.inShapingPolicy.burstSize.inherited = True
+ config.defaultPortConfig.inShapingPolicy.peakBandwidth.inherited = True
+
+ # Egress traffic shaping
+ config.defaultPortConfig.outShapingPolicy = vim.dvs.DistributedVirtualPort.TrafficShapingPolicy()
+ config.defaultPortConfig.outShapingPolicy.averageBandwidth = vim.LongPolicy()
+ config.defaultPortConfig.outShapingPolicy.burstSize = vim.LongPolicy()
+ config.defaultPortConfig.outShapingPolicy.peakBandwidth = vim.LongPolicy()
+ config.defaultPortConfig.outShapingPolicy.enabled = vim.BoolPolicy()
+
+ out_traffic_shaping = self.module.params['out_traffic_shaping']
+ if out_traffic_shaping is not None:
+ if out_traffic_shaping['inherited'] is False:
+ config.defaultPortConfig.outShapingPolicy.inherited = False
+
+ # enabled
+ config.defaultPortConfig.outShapingPolicy.enabled.inherited = False
+ config.defaultPortConfig.outShapingPolicy.enabled.value = out_traffic_shaping['enabled']
+
+ # adverage bandwidth
+ config.defaultPortConfig.outShapingPolicy.averageBandwidth.inherited = False
+ config.defaultPortConfig.outShapingPolicy.averageBandwidth.value = out_traffic_shaping['average_bandwidth'] * 1000
+
+ # burst size
+ config.defaultPortConfig.outShapingPolicy.burstSize.inherited = False
+ config.defaultPortConfig.outShapingPolicy.burstSize.value = out_traffic_shaping['burst_size'] * 1024
+
+ # peak bandwidth
+ config.defaultPortConfig.outShapingPolicy.peakBandwidth.inherited = False
+ config.defaultPortConfig.outShapingPolicy.peakBandwidth.value = out_traffic_shaping['peak_bandwidth'] * 1000
+ else:
+ config.defaultPortConfig.outShapingPolicy.inherited = True
+ config.defaultPortConfig.outShapingPolicy.enabled.inherited = True
+ config.defaultPortConfig.outShapingPolicy.averageBandwidth.inherited = True
+ config.defaultPortConfig.outShapingPolicy.burstSize.inherited = True
+ config.defaultPortConfig.outShapingPolicy.peakBandwidth.inherited = True
+
+ # PG Type
+ if self.module.params['port_binding'] == 'ephemeral':
+ config.type = 'ephemeral'
+ else:
+ config.type = 'earlyBinding'
+
+ if self.port_allocation == 'elastic':
+ config.autoExpand = True
+ else:
+ config.autoExpand = False
+
+ return config
+
+ def process_state(self):
+ dvspg_states = {
+ 'absent': {
+ 'update': self.state_destroy_dvspg,
+ 'present': self.state_destroy_dvspg,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_dvspg,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_dvspg,
+ }
+ }
+ try:
+ dvspg_states[self.module.params['state']][self.check_dvspg_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def update_port_group(self):
+ config = self.build_config()
+ config.configVersion = self.dvs_portgroup.config.configVersion
+ task = self.dvs_portgroup.ReconfigureDVPortgroup_Task(config)
+ changed, result = wait_for_task(task)
+ return changed, result
+
+ def create_port_group(self):
+ config = self.build_config()
+ task = self.dv_switch.AddDVPortgroup_Task([config])
+ changed, result = wait_for_task(task)
+ return changed, result
+
+ def state_destroy_dvspg(self):
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ task = self.dvs_portgroup.Destroy_Task()
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_update_dvspg(self):
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.update_port_group()
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_create_dvspg(self):
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.create_port_group()
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def check_dvs_pvlan(self):
+ for pvlan in self.dv_switch.config.pvlanConfig:
+ if pvlan.primaryVlanId == int(self.module.params['vlan_id']):
+ return True
+ if pvlan.secondaryVlanId == int(self.module.params['vlan_id']):
+ return True
+ return False
+
+ def check_dvspg_state(self):
+ self.dv_switch = find_dvs_by_name(self.content, self.module.params['switch_name'])
+
+ if self.dv_switch is None:
+ self.module.fail_json(msg="A distributed virtual switch with name %s does not exist" % self.module.params['switch_name'])
+ self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.module.params['portgroup_name'])
+
+ if self.dvs_portgroup is None:
+ return 'absent'
+
+ # Check config
+ if self.module.params['port_allocation'] != 'elastic' and self.module.params['port_binding'] != 'ephemeral':
+ if self.dvs_portgroup.config.numPorts != self.module.params['num_ports']:
+ return 'update'
+
+ # Default port config
+ defaultPortConfig = self.dvs_portgroup.config.defaultPortConfig
+ if self.module.params['vlan_trunk']:
+ if not isinstance(defaultPortConfig.vlan, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec):
+ return 'update'
+ if list(map(lambda x: (x.start, x.end), defaultPortConfig.vlan.vlanId)) != self.create_vlan_list():
+ return 'update'
+ elif self.module.params['vlan_private']:
+ if not isinstance(defaultPortConfig.vlan, vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec):
+ return 'update'
+ if defaultPortConfig.vlan.pvlanId != int(self.module.params['vlan_id']):
+ return 'update'
+ else:
+ if not isinstance(defaultPortConfig.vlan, vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec):
+ return 'update'
+ if defaultPortConfig.vlan.vlanId != int(self.module.params['vlan_id']):
+ return 'update'
+
+ if self.module.params['network_policy'] is not None:
+ if defaultPortConfig.macManagementPolicy.inherited != self.module.params['network_policy']['inherited']:
+ return 'update'
+ if not self.module.params['network_policy']['inherited']:
+ if defaultPortConfig.macManagementPolicy.allowPromiscuous != self.module.params['network_policy']['promiscuous'] or \
+ defaultPortConfig.macManagementPolicy.forgedTransmits != self.module.params['network_policy']['forged_transmits'] or \
+ defaultPortConfig.macManagementPolicy.macChanges != self.module.params['network_policy']['mac_changes']:
+ return 'update'
+
+ macLearning = self.module.params['mac_learning']
+ if macLearning:
+ macLearningPolicy = defaultPortConfig.macManagementPolicy.macLearningPolicy
+ if macLearning['allow_unicast_flooding'] is not None and macLearningPolicy.allowUnicastFlooding != macLearning['allow_unicast_flooding']:
+ return 'update'
+ if macLearning['enabled'] is not None and macLearningPolicy.enabled != macLearning['enabled']:
+ return 'update'
+ if macLearning['limit'] is not None and macLearningPolicy.limit != macLearning['limit']:
+ return 'update'
+ if macLearning['limit_policy'] and macLearningPolicy.limitPolicy != macLearning['limit_policy']:
+ return 'update'
+
+ # Teaming Policy
+ teamingPolicy = self.dvs_portgroup.config.defaultPortConfig.uplinkTeamingPolicy
+
+ if self.module.params['teaming_policy']['inbound_policy'] is not None and \
+ teamingPolicy.reversePolicy.value != self.module.params['teaming_policy']['inbound_policy']:
+ return 'update'
+
+ if teamingPolicy.policy.value != self.module.params['teaming_policy']['load_balance_policy'] or \
+ teamingPolicy.notifySwitches.value != self.module.params['teaming_policy']['notify_switches'] or \
+ teamingPolicy.rollingOrder.value != self.module.params['teaming_policy']['rolling_order']:
+ return 'update'
+
+ if self.module.params['teaming_policy']['active_uplinks'] and \
+ teamingPolicy.uplinkPortOrder.activeUplinkPort != self.module.params['teaming_policy']['active_uplinks']:
+ return 'update'
+
+ if self.module.params['teaming_policy']['standby_uplinks'] and \
+ teamingPolicy.uplinkPortOrder.standbyUplinkPort != self.module.params['teaming_policy']['standby_uplinks']:
+ return 'update'
+
+ # NetFlow
+ net_flow = self.module.params['net_flow']
+ if net_flow is not None:
+ if is_boolean(net_flow) and \
+ (self.dvs_portgroup.config.defaultPortConfig.ipfixEnabled.inherited is not False
+ or self.dvs_portgroup.config.defaultPortConfig.ipfixEnabled.value != is_truthy(net_flow)):
+ return 'update'
+ elif self.dvs_portgroup.config.defaultPortConfig.ipfixEnabled.inherited is not True:
+ return 'update'
+
+ # Ingress traffic shaping
+ in_traffic_shaping = self.module.params['in_traffic_shaping']
+ if in_traffic_shaping is not None:
+ if in_traffic_shaping['inherited'] is False and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.inherited is not False and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.enabled.inherited is not False and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.enabled.value != in_traffic_shaping['enabled'] and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.averageBandwidth.inherited is not False and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.averageBandwidth.value != (in_traffic_shaping['average_bandwidth'] * 1000) and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.burstSize.inherited is not False and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.burstSize.value != (in_traffic_shaping['burst_size'] * 1024) and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.peakBandwidth.inherited is not False and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.peakBandwidth.value != (in_traffic_shaping['peak_bandwidth'] * 1000):
+ return 'update'
+ elif in_traffic_shaping['inherited'] is True and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.inherited is not True and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.enabled.inherited is not True and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.averageBandwidth.inherited is not True and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.burstSize.inherited is not True and \
+ self.dvs_portgroup.config.defaultPortConfig.inShapingPolicy.peakBandwidth.inherited is not True:
+ return 'update'
+
+ # Egress traffic shaping
+ out_traffic_shaping = self.module.params['out_traffic_shaping']
+ if out_traffic_shaping is not None:
+ if out_traffic_shaping['inherited'] is False and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.inherited is not False and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.enabled.inherited is not False and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.enabled.value != out_traffic_shaping['enabled'] and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.averageBandwidth.inherited is not False and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.averageBandwidth.value != \
+ (out_traffic_shaping['average_bandwidth'] * 1000) and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.burstSize.inherited is not False and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.burstSize.value != (out_traffic_shaping['burst_size'] * 1024) and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.peakBandwidth.inherited is not False and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.peakBandwidth.value != (out_traffic_shaping['peak_bandwidth'] * 1000):
+ return 'update'
+ elif self.module.params['out_traffic_shaping'] is None and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.inherited is not True and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.enabled.inherited is not True and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.averageBandwidth.inherited is not True and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.burstSize.inherited is not True and \
+ self.dvs_portgroup.config.defaultPortConfig.outShapingPolicy.peakBandwidth.inherited is not True:
+ return 'update'
+
+ # PG policy (advanced_policy)
+ policy = self.dvs_portgroup.config.policy
+ if policy.blockOverrideAllowed != self.module.params['port_policy']['block_override'] or \
+ policy.ipfixOverrideAllowed != self.module.params['port_policy']['ipfix_override'] or \
+ policy.livePortMovingAllowed != self.module.params['port_policy']['live_port_move'] or \
+ policy.macManagementOverrideAllowed != self.module.params['port_policy']['mac_management_override'] or \
+ policy.networkResourcePoolOverrideAllowed != self.module.params['port_policy']['network_rp_override'] or \
+ policy.portConfigResetAtDisconnect != self.module.params['port_policy']['port_config_reset_at_disconnect'] or \
+ policy.securityPolicyOverrideAllowed != self.module.params['port_policy']['mac_management_override'] or \
+ policy.shapingOverrideAllowed != self.module.params['port_policy']['shaping_override'] or \
+ policy.trafficFilterOverrideAllowed != self.module.params['port_policy']['traffic_filter_override'] or \
+ policy.uplinkTeamingOverrideAllowed != self.module.params['port_policy']['uplink_teaming_override'] or \
+ policy.vendorConfigOverrideAllowed != self.module.params['port_policy']['vendor_config_override'] or \
+ policy.vlanOverrideAllowed != self.module.params['port_policy']['vlan_override']:
+ return 'update'
+
+ # PG Type
+ if self.module.params['port_binding'] == 'ephemeral':
+ if self.dvs_portgroup.config.type != 'ephemeral':
+ return 'update'
+ elif self.port_allocation == 'fixed' and self.dvs_portgroup.config.type != 'earlyBinding':
+ return 'update'
+
+ # Check port allocation
+ if self.port_allocation == 'elastic' and self.dvs_portgroup.config.autoExpand is False:
+ return 'update'
+ elif self.port_allocation == 'fixed' and self.dvs_portgroup.config.autoExpand is True:
+ return 'update'
+
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ portgroup_name=dict(required=True, type='str'),
+ switch_name=dict(required=True, type='str'),
+ vlan_id=dict(required=True, type='str'),
+ num_ports=dict(type='int'),
+ port_binding=dict(required=True, type='str', choices=['static', 'ephemeral']),
+ port_allocation=dict(type='str', choices=['fixed', 'elastic']),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ vlan_trunk=dict(type='bool', default=False),
+ vlan_private=dict(type='bool', default=False),
+ network_policy=dict(
+ type='dict',
+ options=dict(
+ inherited=dict(type='bool', required=True),
+ promiscuous=dict(type='bool'),
+ forged_transmits=dict(type='bool'),
+ mac_changes=dict(type='bool')
+ ),
+ required_if=[
+ ('inherited', False, ('promiscuous', 'forged_transmits', 'mac_changes'))
+ ],
+ ),
+ in_traffic_shaping=dict(
+ type='dict',
+ options=dict(
+ inherited=dict(type='bool', required=True),
+ enabled=dict(type='bool'),
+ average_bandwidth=dict(type='int'),
+ peak_bandwidth=dict(type='int'),
+ burst_size=dict(type='int'),
+ ),
+ required_if=[
+ ('inherited', False, ('average_bandwidth', 'peak_bandwidth', 'burst_size'))
+ ],
+ ),
+ out_traffic_shaping=dict(
+ type='dict',
+ options=dict(
+ inherited=dict(type='bool', required=True),
+ enabled=dict(type='bool'),
+ average_bandwidth=dict(type='int'),
+ peak_bandwidth=dict(type='int'),
+ burst_size=dict(type='int'),
+ ),
+ required_if=[
+ ('inherited', False, ('average_bandwidth', 'peak_bandwidth', 'burst_size'))
+ ],
+ ),
+ net_flow=dict(
+ type='str',
+ choices=[
+ 'true',
+ 'on',
+ 'yes',
+ 'false',
+ 'off',
+ 'no',
+ 'inherited',
+ ],
+ ),
+ teaming_policy=dict(
+ type='dict',
+ options=dict(
+ inbound_policy=dict(type='bool'),
+ notify_switches=dict(type='bool', default=True),
+ rolling_order=dict(type='bool', default=False),
+ load_balance_policy=dict(type='str',
+ default='loadbalance_srcid',
+ choices=[
+ 'loadbalance_ip',
+ 'loadbalance_srcmac',
+ 'loadbalance_srcid',
+ 'loadbalance_loadbased',
+ 'failover_explicit',
+ ],
+ ),
+ active_uplinks=dict(type='list', elements='str'),
+ standby_uplinks=dict(type='list', elements='str'),
+ ),
+ default=dict(
+ notify_switches=True,
+ rolling_order=False,
+ load_balance_policy='loadbalance_srcid',
+ ),
+ ),
+ port_policy=dict(
+ type='dict',
+ options=dict(
+ block_override=dict(type='bool', default=True),
+ ipfix_override=dict(type='bool', default=False),
+ live_port_move=dict(type='bool', default=False),
+ network_rp_override=dict(type='bool', default=False),
+ port_config_reset_at_disconnect=dict(type='bool', default=True),
+ mac_management_override=dict(type='bool', default=False, aliases=['security_override']),
+ shaping_override=dict(type='bool', default=False),
+ traffic_filter_override=dict(type='bool', default=False),
+ uplink_teaming_override=dict(type='bool', default=False),
+ vendor_config_override=dict(type='bool', default=False),
+ vlan_override=dict(type='bool', default=False)
+ ),
+ default=dict(
+ block_override=True,
+ ipfix_override=False,
+ live_port_move=False,
+ network_rp_override=False,
+ port_config_reset_at_disconnect=True,
+ mac_management_override=False,
+ shaping_override=False,
+ traffic_filter_override=False,
+ uplink_teaming_override=False,
+ vendor_config_override=False,
+ vlan_override=False
+ ),
+ ),
+ mac_learning=dict(
+ type='dict',
+ options=dict(
+ allow_unicast_flooding=dict(type='bool'),
+ enabled=dict(type='bool'),
+ limit=dict(type='int'),
+ limit_policy=dict(type='str', choices=['allow', 'drop']),
+ ),
+ )
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['vlan_trunk', 'vlan_private'],
+ ],
+ supports_check_mode=True)
+
+ vmware_dvs_portgroup = VMwareDvsPortgroup(module)
+ vmware_dvs_portgroup.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup_find.py b/ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup_find.py
new file mode 100644
index 000000000..f47edb0de
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup_find.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_dvs_portgroup_find
+short_description: Find portgroup(s) in a VMware environment
+description:
+- Find portgroup(s) based on different criteria such as distributed vSwitch, VLAN id or a string in the name.
+author:
+- David Martinez (@dx0xm)
+options:
+ dvswitch:
+ description:
+ - Name of a distributed vSwitch to look for.
+ type: str
+ vlanid:
+ description:
+ - VLAN id can be any number between 1 and 4094.
+ - This search criteria will looks into VLAN ranges to find possible matches.
+ required: false
+ type: int
+ name:
+ description:
+ - string to check inside the name of the portgroup.
+ - Basic containment check using python C(in) operation.
+ type: str
+ show_uplink:
+ description:
+ - Show or hide uplink portgroups.
+ - Only relevant when C(vlanid) is supplied.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Get all portgroups in dvswitch vDS
+ community.vmware.vmware_dvs_portgroup_find:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ dvswitch: 'vDS'
+ delegate_to: localhost
+
+- name: Confirm if vlan 15 is present
+ community.vmware.vmware_dvs_portgroup_find:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ vlanid: '15'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+dvs_portgroups:
+ description: basic details of portgroups found
+ returned: on success
+ type: list
+ sample: [
+ {
+ "dvswitch": "vDS",
+ "name": "N-51",
+ "pvlan": true,
+ "trunk": true,
+ "vlan_id": "0"
+ }
+ ]
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ vmware_argument_spec,
+ PyVmomi)
+from ansible.module_utils.six.moves.urllib.parse import unquote
+
+
+class DVSPortgroupFindManager(PyVmomi):
+ def __init__(self, module):
+ super(DVSPortgroupFindManager, self).__init__(module)
+ self.dvs_name = self.params['dvswitch']
+ self.vlan = self.params['vlanid']
+ self.cmp_vlans = True if self.vlan else False
+ self.pgs = self.find_portgroups_by_name(self.content, self.module.params['name'])
+
+ if self.dvs_name:
+ self.pgs = self.find_portgroups_by_dvs(self.pgs, self.dvs_name)
+
+ def find_portgroups_by_name(self, content, name=None):
+ vimtype = [vim.dvs.DistributedVirtualPortgroup]
+ container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
+ if not name:
+ obj = container.view
+ else:
+ obj = []
+ for c in container.view:
+ if name in c.name:
+ obj.append(c)
+
+ return obj
+
+ def find_portgroups_by_dvs(self, pgl, dvs):
+ obj = []
+ for c in pgl:
+ if dvs in c.config.distributedVirtualSwitch.name:
+ obj.append(c)
+
+ return obj
+
+ def vlan_match(self, pgup, userup, vlanlst):
+ res = False
+ if pgup and userup:
+ return True
+
+ for ln in vlanlst:
+ if '-' in ln:
+ arr = ln.split('-')
+ if int(arr[0]) < self.vlan and self.vlan < int(arr[1]):
+ res = True
+ elif ln == str(self.vlan):
+ res = True
+
+ return res
+
+ def get_dvs_portgroup(self):
+ pgroups = self.pgs
+
+ pglist = []
+ for pg in pgroups:
+ trunk = False
+ pvlan = False
+ vlanInfo = pg.config.defaultPortConfig.vlan
+ cl1 = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec
+ cl2 = vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec
+ vlan_id_list = []
+ if isinstance(vlanInfo, cl1):
+ trunk = True
+ for item in vlanInfo.vlanId:
+ if item.start == item.end:
+ vlan_id_list.append(str(item.start))
+ else:
+ vlan_id_list.append(str(item.start) + '-' + str(item.end))
+ elif isinstance(vlanInfo, cl2):
+ pvlan = True
+ vlan_id_list.append(str(vlanInfo.pvlanId))
+ else:
+ vlan_id_list.append(str(vlanInfo.vlanId))
+
+ if self.cmp_vlans:
+ if self.vlan_match(pg.config.uplink, self.module.params['show_uplink'], vlan_id_list):
+ pglist.append(dict(
+ name=unquote(pg.name),
+ trunk=trunk,
+ pvlan=pvlan,
+ vlan_id=','.join(vlan_id_list),
+ dvswitch=pg.config.distributedVirtualSwitch.name))
+ else:
+ pglist.append(dict(
+ name=unquote(pg.name),
+ trunk=trunk,
+ pvlan=pvlan,
+ vlan_id=','.join(vlan_id_list),
+ dvswitch=pg.config.distributedVirtualSwitch.name))
+
+ return pglist
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dvswitch=dict(type='str', required=False),
+ vlanid=dict(type='int', required=False),
+ name=dict(type='str', required=False),
+ show_uplink=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ['show_uplink', 'True', ['vlanid']]
+ ]
+ )
+
+ dvs_pg_mgr = DVSPortgroupFindManager(module)
+ module.exit_json(changed=False,
+ dvs_portgroups=dvs_pg_mgr.get_dvs_portgroup())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup_info.py
new file mode 100644
index 000000000..d80d4ec17
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_dvs_portgroup_info.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_dvs_portgroup_info
+short_description: Gathers info DVS portgroup configurations
+description:
+- This module can be used to gather information about DVS portgroup configurations.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ datacenter:
+ description:
+ - Name of the datacenter.
+ required: true
+ type: str
+ dvswitch:
+ description:
+ - Name of a dvswitch to look for.
+ required: false
+ type: str
+ show_mac_learning:
+ description:
+ - Show or hide MAC learning information of the DVS portgroup.
+ type: bool
+ default: true
+ show_network_policy:
+ description:
+ - Show or hide network policies of DVS portgroup.
+ type: bool
+ default: true
+ show_port_policy:
+ description:
+ - Show or hide port policies of DVS portgroup.
+ type: bool
+ default: true
+ show_teaming_policy:
+ description:
+ - Show or hide teaming policies of DVS portgroup.
+ type: bool
+ default: true
+ show_uplinks:
+ description:
+ - Show or hide uplinks of DVS portgroup.
+ type: bool
+ default: true
+ show_vlan_info:
+ description:
+ - Show or hide vlan information of the DVS portgroup.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Get info about DVPG
+ community.vmware.vmware_dvs_portgroup_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ register: dvpg_info
+
+- name: Get number of ports for portgroup 'dvpg_001' in 'dvs_001'
+ debug:
+ msg: "{{ item.num_ports }}"
+ with_items:
+ - "{{ dvpg_info.dvs_portgroup_info['dvs_001'] | json_query(query) }}"
+ vars:
+ query: "[?portgroup_name=='dvpg_001']"
+'''
+
+RETURN = r'''
+dvs_portgroup_info:
+ description: metadata about DVS portgroup configuration
+ returned: on success
+ type: dict
+ sample: {
+ "dvs_0":[
+ {
+ "description": null,
+ "dvswitch_name": "dvs_001",
+ "network_policy": {
+ "forged_transmits": false,
+ "mac_changes": false,
+ "promiscuous": false
+ },
+ "num_ports": 8,
+ "port_policy": {
+ "block_override": true,
+ "ipfix_override": false,
+ "live_port_move": false,
+ "network_rp_override": false,
+ "port_config_reset_at_disconnect": true,
+ "security_override": false,
+ "shaping_override": false,
+ "traffic_filter_override": false,
+ "uplink_teaming_override": false,
+ "vendor_config_override": false,
+ "vlan_override": false
+ },
+ "portgroup_name": "dvpg_001",
+ "teaming_policy": {
+ "inbound_policy": true,
+ "notify_switches": true,
+ "policy": "loadbalance_srcid",
+ "rolling_order": false
+ },
+ "vlan_info": {
+ "trunk": false,
+ "pvlan": false,
+ "vlan_id": 0
+ },
+ "type": "earlyBinding"
+ },
+ ]
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ vmware_argument_spec,
+ PyVmomi,
+ get_all_objs,
+ find_dvs_by_name)
+from ansible.module_utils.six.moves.urllib.parse import unquote
+
+
+class DVSPortgroupInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(DVSPortgroupInfoManager, self).__init__(module)
+ self.dc_name = self.params['datacenter']
+ self.dvs_name = self.params['dvswitch']
+
+ datacenter = self.find_datacenter_by_name(self.dc_name)
+ if datacenter is None:
+ self.module.fail_json(msg="Failed to find the datacenter %s" % self.dc_name)
+ if self.dvs_name:
+ # User specified specific dvswitch name to gather information
+ dvsn = find_dvs_by_name(self.content, self.dvs_name)
+ if dvsn is None:
+ self.module.fail_json(msg="Failed to find the dvswitch %s" % self.dvs_name)
+
+ self.dvsls = [dvsn]
+ else:
+ # default behaviour, gather information about all dvswitches
+ self.dvsls = get_all_objs(self.content, [vim.DistributedVirtualSwitch], folder=datacenter.networkFolder)
+
+ def get_vlan_info(self, vlan_obj=None):
+ """
+ Return vlan information from given object
+ Args:
+ vlan_obj: vlan managed object
+ Returns: Dict of vlan details of the specific object
+ """
+
+ vdret = dict()
+ if not vlan_obj:
+ return vdret
+
+ if isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec):
+ vlan_id_list = []
+ for vli in vlan_obj.vlanId:
+ if vli.start == vli.end:
+ vlan_id_list.append(str(vli.start))
+ else:
+ vlan_id_list.append(str(vli.start) + "-" + str(vli.end))
+ vdret = dict(trunk=True, pvlan=False, vlan_id=vlan_id_list)
+ elif isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec):
+ vdret = dict(trunk=False, pvlan=True, vlan_id=str(vlan_obj.pvlanId))
+ else:
+ vdret = dict(trunk=False, pvlan=False, vlan_id=str(vlan_obj.vlanId))
+
+ return vdret
+
+ def gather_dvs_portgroup_info(self):
+ dvs_lists = self.dvsls
+ result = dict()
+ for dvs in dvs_lists:
+ result[dvs.name] = list()
+ for dvs_pg in dvs.portgroup:
+ mac_learning = dict()
+ network_policy = dict()
+ teaming_policy = dict()
+ port_policy = dict()
+ vlan_info = dict()
+ active_uplinks = list()
+ standby_uplinks = list()
+
+ if dvs_pg.config.type == 'ephemeral':
+ port_binding = 'ephemeral'
+ else:
+ port_binding = 'static'
+
+ if dvs_pg.config.autoExpand is True:
+ port_allocation = 'elastic'
+ else:
+ port_allocation = 'fixed'
+
+ if self.module.params['show_network_policy']:
+ network_policy = dict(
+ forged_transmits=dvs_pg.config.defaultPortConfig.macManagementPolicy.forgedTransmits,
+ promiscuous=dvs_pg.config.defaultPortConfig.macManagementPolicy.allowPromiscuous,
+ mac_changes=dvs_pg.config.defaultPortConfig.macManagementPolicy.macChanges
+ )
+
+ if self.module.params['show_mac_learning']:
+ macLearningPolicy = dvs_pg.config.defaultPortConfig.macManagementPolicy.macLearningPolicy
+ mac_learning = dict(
+ allow_unicast_flooding=macLearningPolicy.allowUnicastFlooding,
+ enabled=macLearningPolicy.enabled,
+ limit=macLearningPolicy.limit,
+ limit_policy=macLearningPolicy.limitPolicy
+ )
+
+ if self.module.params['show_teaming_policy']:
+ teaming_policy = dict(
+ policy=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.policy.value,
+ inbound_policy=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.reversePolicy.value,
+ notify_switches=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.notifySwitches.value,
+ rolling_order=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.rollingOrder.value,
+ )
+
+ if self.module.params['show_uplinks'] and \
+ dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy and \
+ dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder:
+ active_uplinks = dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder.activeUplinkPort
+ standby_uplinks = dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.uplinkPortOrder.standbyUplinkPort
+
+ if self.params['show_port_policy']:
+ port_policy = dict(
+ block_override=dvs_pg.config.policy.blockOverrideAllowed,
+ ipfix_override=dvs_pg.config.policy.ipfixOverrideAllowed,
+ live_port_move=dvs_pg.config.policy.livePortMovingAllowed,
+ network_rp_override=dvs_pg.config.policy.networkResourcePoolOverrideAllowed,
+ port_config_reset_at_disconnect=dvs_pg.config.policy.portConfigResetAtDisconnect,
+ security_override=dvs_pg.config.policy.macManagementOverrideAllowed,
+ shaping_override=dvs_pg.config.policy.shapingOverrideAllowed,
+ traffic_filter_override=dvs_pg.config.policy.trafficFilterOverrideAllowed,
+ uplink_teaming_override=dvs_pg.config.policy.uplinkTeamingOverrideAllowed,
+ vendor_config_override=dvs_pg.config.policy.vendorConfigOverrideAllowed,
+ vlan_override=dvs_pg.config.policy.vlanOverrideAllowed
+ )
+
+ if self.params['show_vlan_info']:
+ vlan_info = self.get_vlan_info(dvs_pg.config.defaultPortConfig.vlan)
+
+ dvpg_details = dict(
+ portgroup_name=unquote(dvs_pg.name),
+ num_ports=dvs_pg.config.numPorts,
+ dvswitch_name=dvs_pg.config.distributedVirtualSwitch.name,
+ description=dvs_pg.config.description,
+ type=dvs_pg.config.type,
+ port_binding=port_binding,
+ port_allocation=port_allocation,
+ teaming_policy=teaming_policy,
+ port_policy=port_policy,
+ mac_learning=mac_learning,
+ network_policy=network_policy,
+ vlan_info=vlan_info,
+ key=dvs_pg.key,
+ active_uplinks=active_uplinks,
+ standby_uplinks=standby_uplinks,
+ )
+ result[dvs.name].append(dvpg_details)
+
+ return result
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(type='str', required=True),
+ show_mac_learning=dict(type='bool', default=True),
+ show_network_policy=dict(type='bool', default=True),
+ show_teaming_policy=dict(type='bool', default=True),
+ show_uplinks=dict(type='bool', default=True),
+ show_port_policy=dict(type='bool', default=True),
+ dvswitch=dict(),
+ show_vlan_info=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ dvs_pg_mgr = DVSPortgroupInfoManager(module)
+ module.exit_json(changed=False,
+ dvs_portgroup_info=dvs_pg_mgr.gather_dvs_portgroup_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch.py b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch.py
new file mode 100644
index 000000000..1d184b2dc
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch.py
@@ -0,0 +1,1064 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_dvswitch
+short_description: Create or remove a Distributed Switch
+description:
+ - This module can be used to create, remove a Distributed Switch.
+author:
+- Joseph Callen (@jcpowermac)
+- Abhijeet Kasurde (@Akasurde)
+- Christian Kotte (@ckotte)
+options:
+ datacenter_name:
+ description:
+ - The name of the datacenter that will contain the Distributed Switch.
+ - This parameter is optional, if C(folder) is provided.
+ - Mutually exclusive with C(folder) parameter.
+ required: false
+ aliases: ['datacenter']
+ type: str
+ switch_name:
+ description:
+ - The name of the distribute vSwitch to create or remove.
+ required: true
+ aliases: ['switch', 'dvswitch']
+ type: str
+ switch_version:
+ description:
+ - The version of the Distributed Switch to create.
+ - The version must match the version of the ESXi hosts you want to connect.
+ - The version of the vCenter server is used if not specified.
+ - Required only if C(state) is set to C(present).
+ aliases: ['version']
+ type: str
+ mtu:
+ description:
+ - The switch maximum transmission unit.
+ - Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
+ - Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
+ - Accepts value between 1280 to 9000 (both inclusive).
+ type: int
+ default: 1500
+ multicast_filtering_mode:
+ description:
+ - The multicast filtering mode.
+ - 'C(basic) mode: multicast traffic for virtual machines is forwarded according to the destination MAC address of the multicast group.'
+ - 'C(snooping) mode: the Distributed Switch provides IGMP and MLD snooping according to RFC 4541.'
+ type: str
+ choices: ['basic', 'snooping']
+ default: 'basic'
+ uplink_quantity:
+ description:
+ - Quantity of uplink per ESXi host added to the Distributed Switch.
+ - The uplink quantity can be increased or decreased, but a decrease will only be successfull if the uplink isn't used by a portgroup.
+ - Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
+ - Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
+ type: int
+ uplink_prefix:
+ description:
+ - The prefix used for the naming of the uplinks.
+ - Only valid if the Distributed Switch will be created. Not used if the Distributed Switch is already present.
+ - Uplinks are created as Uplink 1, Uplink 2, etc. pp. by default.
+ default: 'Uplink '
+ type: str
+ discovery_proto:
+ description:
+ - Link discovery protocol between Cisco and Link Layer discovery.
+ - Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
+ - Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
+ - 'C(cdp): Use Cisco Discovery Protocol (CDP).'
+ - 'C(lldp): Use Link Layer Discovery Protocol (LLDP).'
+ - 'C(disabled): Do not use a discovery protocol.'
+ choices: ['cdp', 'lldp', 'disabled']
+ default: 'cdp'
+ aliases: [ 'discovery_protocol' ]
+ type: str
+ discovery_operation:
+ description:
+ - Select the discovery operation.
+ - Required parameter for C(state) both C(present) and C(absent), before Ansible 2.6 version.
+ - Required only if C(state) is set to C(present), for Ansible 2.6 and onwards.
+ choices: ['both', 'advertise', 'listen']
+ default: 'listen'
+ type: str
+ contact:
+ description:
+ - Dictionary which configures administrator contact name and description for the Distributed Switch.
+ suboptions:
+ name:
+ type: str
+ description: Administrator name.
+ description:
+ type: str
+ description: Description or other details.
+ type: dict
+ description:
+ description:
+ - Description of the Distributed Switch.
+ type: str
+ health_check:
+ description:
+ - Dictionary which configures Health Check for the Distributed Switch.
+ suboptions:
+ vlan_mtu:
+ type: bool
+ description: VLAN and MTU health check.
+ default: false
+ teaming_failover:
+ type: bool
+ description: Teaming and failover health check.
+ default: false
+ vlan_mtu_interval:
+ type: int
+ description:
+ - VLAN and MTU health check interval (minutes).
+ - The default value is 1 in the vSphere Client if the VLAN and MTU health check is enabled.
+ default: 0
+ teaming_failover_interval:
+ type: int
+ description:
+ - Teaming and failover health check interval (minutes).
+ - The default value is 1 in the vSphere Client if the Teaming and failover health check is enabled.
+ default: 0
+ type: dict
+ default: {
+ vlan_mtu: false,
+ teaming_failover: false,
+ vlan_mtu_interval: 0,
+ teaming_failover_interval: 0,
+ }
+ network_policy:
+ description:
+ - Dictionary which configures the different default security values for portgroups.
+ - If set, these options are inherited by the portgroups of the DVS.
+ suboptions:
+ promiscuous:
+ type: bool
+ description: Indicates whether promiscuous mode is allowed.
+ default: false
+ forged_transmits:
+ type: bool
+ description: Indicates whether forged transmits are allowed.
+ default: false
+ mac_changes:
+ type: bool
+ description: Indicates whether mac changes are allowed.
+ default: false
+ required: false
+ type: dict
+ state:
+ description:
+ - If set to C(present) and the Distributed Switch does not exist, the Distributed Switch will be created.
+ - If set to C(absent) and the Distributed Switch exists, the Distributed Switch will be deleted.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute path to place dvswitch in.
+ - The folder should include the datacenter.
+ - This parameter is case sensitive.
+ - This parameter is optional, if C(datacenter) is provided.
+ - 'Examples:'
+ - ' folder: /datacenter1/network'
+ - ' folder: datacenter1/network'
+ - ' folder: /datacenter1/network/folder1'
+ - ' folder: datacenter1/network/folder1'
+ - ' folder: /folder1/datacenter1/network'
+ - ' folder: folder1/datacenter1/network'
+ - ' folder: /folder1/datacenter1/network/folder2'
+ required: false
+ type: str
+ net_flow:
+ version_added: '2.7.0'
+ description:
+ - Dictionary which configures the Net Flow for the Distributed Switch.
+ suboptions:
+ collector_ip:
+ type: str
+ description: The IP Address (IPv4 or IPv6) of the NetFlow collector.
+ collector_port:
+ type: int
+ description: The Port of the NetFlow collector.
+ default: 0
+ observation_domain_id:
+ type: int
+ description: Identifies the information related to the switch.
+ default: 0
+ active_flow_timeout:
+ type: int
+ description: The time, in seconds, to wait before sending information after the flow is initiated.
+ default: 60
+ idle_flow_timeout:
+ type: int
+ description: The time, in seconds, to wait before sending information after the flow is initiated.
+ default: 15
+ sampling_rate:
+ type: int
+ description:
+ - The portion of data that the switch collects.
+ - The sampling rate represents the number of packets that NetFlow drops after every collected packet.
+ - If the rate is 0, NetFlow samples every packet, that is, collect one packet and drop none.
+ - If the rate is 1, NetFlow samples a packet and drops the next one, and so on.
+ default: 4096
+ internal_flows_only:
+ type: bool
+ description: If True, data on network activity between vms on the same host will be collected only.
+ default: false
+ type: dict
+ default: {
+ 'collector_port': 0,
+ 'observation_domain_id': 0,
+ 'active_flow_timeout': 60,
+ 'idle_flow_timeout': 15,
+ 'sampling_rate': 4096,
+ 'internal_flows_only': false
+ }
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create dvSwitch
+ community.vmware.vmware_dvswitch:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: '{{ datacenter }}'
+ switch: dvSwitch
+ version: 6.0.0
+ mtu: 9000
+ uplink_quantity: 2
+ discovery_protocol: lldp
+ discovery_operation: both
+ state: present
+ delegate_to: localhost
+
+- name: Create dvSwitch with all options
+ community.vmware.vmware_dvswitch:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: '{{ datacenter }}'
+ switch: dvSwitch
+ version: 6.5.0
+ mtu: 9000
+ uplink_quantity: 2
+ uplink_prefix: 'Uplink_'
+ discovery_protocol: cdp
+ discovery_operation: both
+ multicast_filtering_mode: snooping
+ health_check:
+ vlan_mtu: true
+ vlan_mtu_interval: 1
+ teaming_failover: true
+ teaming_failover_interval: 1
+ net_flow:
+ collector_ip: 192.168.10.50
+ collector_port: 50034
+ observation_domain_id: 0
+ active_flow_timeout: 60
+ idle_flow_timeout: 15
+ sampling_rate: 4096
+ internal_flows_only: false
+ state: present
+ delegate_to: localhost
+
+- name: Delete dvSwitch
+ community.vmware.vmware_dvswitch:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: '{{ datacenter }}'
+ switch: dvSwitch
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: information about performed operation
+ returned: always
+ type: str
+ sample: {
+ "changed": false,
+ "contact": null,
+ "contact_details": null,
+ "description": null,
+ "discovery_operation": "both",
+ "discovery_protocol": "cdp",
+ "dvswitch": "test",
+ "health_check_teaming": false,
+ "health_check_teaming_interval": 0,
+ "health_check_vlan": false,
+ "health_check_vlan_interval": 0,
+ "net_flow_collector_ip": "192.168.10.50",
+ "net_flow_collector_port": 50034,
+ "net_flow_observation_domain_id": 0,
+ "net_flow_active_flow_timeout": 60,
+ "net_flow_idle_flow_timeout": 15,
+ "net_flow_sampling_rate": 4096,
+ "net_flow_internal_flows_only": false,
+ "mtu": 9000,
+ "multicast_filtering_mode": "basic",
+ "result": "DVS already configured properly",
+ "uplink_quantity": 2,
+ "uplinks": [
+ "Uplink_1",
+ "Uplink_2"
+ ],
+ "version": "6.6.0"
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task
+)
+
+
+class VMwareDvSwitch(PyVmomi):
+ """Class to manage a Distributed Virtual Switch"""
+
+ def __init__(self, module):
+ super(VMwareDvSwitch, self).__init__(module)
+ self.dvs = None
+
+ self.switch_name = self.module.params['switch_name']
+ self.switch_version = self.module.params['switch_version']
+
+ if self.switch_version is not None:
+ available_dvs_versions = self.available_dvs_versions()
+ if self.switch_version not in available_dvs_versions:
+ self.module.fail_json(msg="Unsupported version '%s'. Supported versions are: %s." % (self.switch_version, ', '.join(available_dvs_versions)))
+
+ folder = self.params['folder']
+ if folder:
+ self.folder_obj = self.content.searchIndex.FindByInventoryPath(folder)
+ if not self.folder_obj:
+ self.module.fail_json(msg="Failed to find the folder specified by %(folder)s" % self.params)
+ else:
+ datacenter_name = self.params.get('datacenter_name')
+ datacenter_obj = self.find_datacenter_by_name(datacenter_name)
+ if not datacenter_obj:
+ self.module.fail_json(msg="Failed to find datacenter '%s' required"
+ " for managing distributed vSwitch." % datacenter_name)
+ self.folder_obj = datacenter_obj.networkFolder
+
+ self.mtu = self.module.params['mtu']
+ # MTU sanity check
+ if not 1280 <= self.mtu <= 9000:
+ self.module.fail_json(
+ msg="MTU value should be between 1280 and 9000 (both inclusive), provided %d." % self.mtu
+ )
+ self.multicast_filtering_mode = self.module.params['multicast_filtering_mode']
+ self.uplink_quantity = self.module.params['uplink_quantity']
+ self.uplink_prefix = self.module.params['uplink_prefix']
+ self.discovery_protocol = self.module.params['discovery_proto']
+ self.discovery_operation = self.module.params['discovery_operation']
+ # TODO: add port mirroring
+ self.health_check_vlan = self.params['health_check'].get('vlan_mtu')
+ self.health_check_vlan_interval = self.params['health_check'].get('vlan_mtu_interval')
+ self.health_check_teaming = self.params['health_check'].get('teaming_failover')
+ self.health_check_teaming_interval = self.params['health_check'].get('teaming_failover_interval')
+ if self.params['contact']:
+ self.contact_name = self.params['contact'].get('name')
+ self.contact_details = self.params['contact'].get('description')
+ else:
+ self.contact_name = None
+ self.contact_details = None
+ self.description = self.module.params['description']
+
+ self.network_policy = self.module.params['network_policy']
+ if self.network_policy is None:
+ self.network_policy = {}
+
+ self.netFlow_collector_ip = self.module.params['net_flow'].get('collector_ip') or None
+ self.netFlow_collector_port = self.module.params['net_flow'].get('collector_port')
+ self.netFlow_observation_domain_id = self.module.params['net_flow'].get('observation_domain_id')
+ self.netFlow_active_flow_timeout = self.module.params['net_flow'].get('active_flow_timeout')
+ self.netFlow_idle_flow_timeout = self.module.params['net_flow'].get('idle_flow_timeout')
+ self.netFlow_sampling_rate = self.module.params['net_flow'].get('sampling_rate')
+ self.netFlow_internal_flows_only = self.module.params['net_flow'].get('internal_flows_only')
+
+ self.state = self.module.params['state']
+
+ def available_dvs_versions(self):
+ """Get the DVS version supported by the vCenter"""
+ dvs_mng = self.content.dvSwitchManager
+ available_dvs_specs = dvs_mng.QueryAvailableDvsSpec(recommended=True)
+
+ available_dvs_versions = []
+ for available_dvs_spec in available_dvs_specs:
+ available_dvs_versions.append(available_dvs_spec.version)
+
+ return available_dvs_versions
+
+ def process_state(self):
+ """Process the current state of the DVS"""
+ dvs_states = {
+ 'absent': {
+ 'present': self.destroy_dvswitch,
+ 'absent': self.exit_unchanged,
+ },
+ 'present': {
+ 'present': self.update_dvswitch,
+ 'absent': self.create_dvswitch,
+ }
+ }
+
+ try:
+ dvs_states[self.state][self.check_dvs()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ def check_dvs(self):
+ """Check if DVS is present"""
+ self.dvs = find_dvs_by_name(self.content, self.switch_name, folder=self.folder_obj)
+ if self.dvs is None:
+ return 'absent'
+ return 'present'
+
+ def create_dvswitch(self):
+ """Create a DVS"""
+ changed = True
+ results = dict(changed=changed)
+
+ spec = vim.DistributedVirtualSwitch.CreateSpec()
+ spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
+ # Name
+ results['dvswitch'] = self.switch_name
+ spec.configSpec.name = self.switch_name
+ # MTU
+ results['mtu'] = self.mtu
+ spec.configSpec.maxMtu = self.mtu
+ # Discovery Protocol type and operation
+ results['discovery_protocol'] = self.discovery_protocol
+ results['discovery_operation'] = self.discovery_operation
+ spec.configSpec.linkDiscoveryProtocolConfig = self.create_ldp_spec()
+ # Administrator contact
+ results['contact'] = self.contact_name
+ results['contact_details'] = self.contact_details
+ if self.contact_name or self.contact_details:
+ spec.configSpec.contact = self.create_contact_spec()
+ # Description
+ results['description'] = self.description
+ if self.description:
+ spec.description = self.description
+ # Uplinks
+ results['uplink_quantity'] = self.uplink_quantity
+ spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
+ for count in range(1, self.uplink_quantity + 1):
+ spec.configSpec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count))
+ results['uplinks'] = spec.configSpec.uplinkPortPolicy.uplinkPortName
+ # Version
+ results['version'] = self.switch_version
+ if self.switch_version:
+ spec.productInfo = self.create_product_spec(self.switch_version)
+
+ if self.module.check_mode:
+ result = "DVS would be created"
+ else:
+ # Create DVS
+ network_folder = self.folder_obj
+ task = network_folder.CreateDVS_Task(spec)
+ try:
+ wait_for_task(task)
+ except TaskError as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to create DVS : %s" % to_native(invalid_argument)
+ )
+ # Find new DVS
+ self.dvs = find_dvs_by_name(self.content, self.switch_name)
+ changed_multicast = changed_network_policy = False
+ spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
+ # Use the same version in the new spec; The version will be increased by one by the API automatically
+ spec.configVersion = self.dvs.config.configVersion
+ # Set multicast filtering mode
+ results['multicast_filtering_mode'] = self.multicast_filtering_mode
+ multicast_filtering_mode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode)
+ if self.dvs.config.multicastFilteringMode != multicast_filtering_mode:
+ changed_multicast = True
+ spec.multicastFilteringMode = multicast_filtering_mode
+ spec.multicastFilteringMode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode)
+ # Set default network policy
+ network_policy = self.network_policy
+ if 'promiscuous' in network_policy or 'forged_transmits' in network_policy or 'mac_changes' in network_policy:
+ results['network_policy'] = {}
+ if 'promiscuous' in network_policy:
+ results['network_policy']['promiscuous'] = network_policy['promiscuous']
+ if 'forged_transmits' in network_policy:
+ results['network_policy']['forged_transmits'] = network_policy['forged_transmits']
+ if 'mac_changes' in network_policy:
+ results['network_policy']['mac_changes'] = network_policy['mac_changes']
+
+ result = self.check_network_policy_config()
+ changed_network_policy = result[1]
+ if changed_network_policy:
+ if spec.defaultPortConfig is None:
+ spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+ spec.defaultPortConfig.macManagementPolicy = result[0]
+
+ # Set NetFlow config
+ if self.netFlow_collector_ip is not None:
+ results['net_flow_collector_ip'] = self.netFlow_collector_ip
+ results['net_flow_collector_port'] = self.netFlow_collector_port
+ results['net_flow_observation_domain_id'] = self.netFlow_observation_domain_id
+ results['net_flow_active_flow_timeout'] = self.netFlow_active_flow_timeout
+ results['net_flow_idle_flow_timeout'] = self.netFlow_idle_flow_timeout
+ results['net_flow_sampling_rate'] = self.netFlow_sampling_rate
+ results['net_flow_internal_flows_only'] = self.netFlow_internal_flows_only
+ result = self.check_netFlow_config()
+
+ changed_netFlow = result[1]
+ if changed_netFlow:
+ spec.ipfixConfig = result[0]
+
+ if changed_multicast or changed_network_policy or changed_netFlow:
+ self.update_dvs_config(self.dvs, spec)
+
+ # Set Health Check config
+ results['health_check_vlan'] = self.health_check_vlan
+ results['health_check_teaming'] = self.health_check_teaming
+ results['uuid'] = self.dvs.uuid
+ result = self.check_health_check_config(self.dvs.config.healthCheckConfig)
+ changed_health_check = result[1]
+ if changed_health_check:
+ self.update_health_check_config(self.dvs, result[0])
+
+ result = "DVS created"
+
+ self.module.exit_json(changed=changed, result=to_native(result))
+
+ def create_ldp_spec(self):
+ """Create Link Discovery Protocol config spec"""
+ ldp_config_spec = vim.host.LinkDiscoveryProtocolConfig()
+ if self.discovery_protocol == 'disabled':
+ ldp_config_spec.protocol = 'cdp'
+ ldp_config_spec.operation = 'none'
+ else:
+ ldp_config_spec.protocol = self.discovery_protocol
+ ldp_config_spec.operation = self.discovery_operation
+ return ldp_config_spec
+
+ def create_product_spec(self, switch_version):
+ """Create product info spec"""
+ product_info_spec = vim.dvs.ProductSpec()
+ product_info_spec.version = switch_version
+ return product_info_spec
+
+ @staticmethod
+ def get_api_mc_filtering_mode(mode):
+ """Get Multicast filtering mode"""
+ if mode == 'basic':
+ return 'legacyFiltering'
+ return 'snooping'
+
+ def create_contact_spec(self):
+ """Create contact info spec"""
+ contact_info_spec = vim.DistributedVirtualSwitch.ContactInfo()
+ contact_info_spec.name = self.contact_name
+ contact_info_spec.contact = self.contact_details
+ return contact_info_spec
+
+ def update_dvs_config(self, switch_object, spec):
+ """Update DVS config"""
+ try:
+ task = switch_object.ReconfigureDvs_Task(spec)
+ wait_for_task(task)
+ except TaskError as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to update DVS : %s" % to_native(invalid_argument)
+ )
+
+ def check_network_policy_config(self):
+ changed_promiscuous = changed_forged_transmits = changed_mac_changes = False
+ promiscuous_previous = forged_transmits_previous = mac_changes_previous = None
+ current_config = self.dvs.config.defaultPortConfig
+
+ policy = vim.dvs.VmwareDistributedVirtualSwitch.MacManagementPolicy()
+
+ if 'promiscuous' in self.network_policy and current_config.macManagementPolicy.allowPromiscuous != self.network_policy['promiscuous']:
+ changed_promiscuous = True
+ promiscuous_previous = current_config.macManagementPolicy.allowPromiscuous
+ policy.allowPromiscuous = self.network_policy['promiscuous']
+
+ if 'forged_transmits' in self.network_policy and current_config.macManagementPolicy.forgedTransmits != self.network_policy['forged_transmits']:
+ changed_forged_transmits = True
+ forged_transmits_previous = current_config.macManagementPolicy.forgedTransmits
+ policy.forgedTransmits = self.network_policy['forged_transmits']
+
+ if 'mac_changes' in self.network_policy and current_config.macManagementPolicy.macChanges != self.network_policy['mac_changes']:
+ changed_mac_changes = True
+ mac_changes_previous = current_config.macManagementPolicy.macChanges
+ policy.macChanges = self.network_policy['mac_changes']
+
+ changed = changed_promiscuous or changed_forged_transmits or changed_mac_changes
+ return (policy, changed, changed_promiscuous, promiscuous_previous, changed_forged_transmits,
+ forged_transmits_previous, changed_mac_changes, mac_changes_previous)
+
+ def check_health_check_config(self, health_check_config):
+ """Check Health Check config"""
+ changed = changed_vlan = changed_vlan_interval = changed_teaming = changed_teaming_interval = False
+ vlan_previous = teaming_previous = None
+ vlan_interval_previous = teaming_interval_previous = 0
+ for config in health_check_config:
+ if isinstance(config, vim.dvs.VmwareDistributedVirtualSwitch.VlanMtuHealthCheckConfig):
+ if config.enable != self.health_check_vlan:
+ changed = changed_vlan = True
+ vlan_previous = config.enable
+ config.enable = self.health_check_vlan
+ if config.enable and config.interval != self.health_check_vlan_interval:
+ changed = changed_vlan_interval = True
+ vlan_interval_previous = config.interval
+ config.interval = self.health_check_vlan_interval
+ if isinstance(config, vim.dvs.VmwareDistributedVirtualSwitch.TeamingHealthCheckConfig):
+ if config.enable != self.health_check_teaming:
+ changed = changed_teaming = True
+ teaming_previous = config.enable
+ config.enable = self.health_check_teaming
+ if config.enable and config.interval != self.health_check_teaming_interval:
+ changed = changed_teaming_interval = True
+ teaming_interval_previous = config.interval
+ config.interval = self.health_check_teaming_interval
+ return (health_check_config, changed, changed_vlan, vlan_previous, changed_vlan_interval, vlan_interval_previous,
+ changed_teaming, teaming_previous, changed_teaming_interval, teaming_interval_previous)
+
+ def update_health_check_config(self, switch_object, health_check_config):
+ """Update Health Check config"""
+ try:
+ task = switch_object.UpdateDVSHealthCheckConfig_Task(healthCheckConfig=health_check_config)
+ except vim.fault.DvsFault as dvs_fault:
+ self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault))
+ except vmodl.fault.NotSupported as not_supported:
+ self.module.fail_json(msg="Health check not supported on the switch : %s" % to_native(not_supported))
+ except TaskError as invalid_argument:
+ self.module.fail_json(msg="Failed to configure health check : %s" % to_native(invalid_argument))
+ try:
+ wait_for_task(task)
+ except TaskError as invalid_argument:
+ self.module.fail_json(msg="Failed to update health check config : %s" % to_native(invalid_argument))
+
+ def check_netFlow_config(self):
+ """Check NetFlow config"""
+ changed = changed_collectorIpAddress = changed_collectorPort = changed_observationDomainId = \
+ changed_activeFlowTimeout = changed_idleFlowTimeout = changed_samplingRate = changed_internalFlowsOnly = False
+ collectorIpAddress_previous = collectorPort_previous = observationDomainId_previous = activeFlowTimeout_previous = \
+ idleFlowTimeout_previous = samplingRate_previous = internalFlowsOnly_previous = None
+
+ current_config = self.dvs.config.ipfixConfig
+ if current_config is None:
+ new_config = vim.dvs.VmwareDistributedVirtualSwitch.IpfixConfig()
+ else:
+ new_config = current_config
+
+ if self.netFlow_collector_ip is not None:
+ if current_config.collectorIpAddress != self.netFlow_collector_ip:
+ changed = changed_collectorIpAddress = True
+ collectorIpAddress_previous = current_config.collectorIpAddress
+ new_config.collectorIpAddress = self.netFlow_collector_ip
+ if current_config.collectorPort != self.netFlow_collector_port:
+ changed = changed_collectorPort = True
+ collectorPort_previous = current_config.collectorPort
+ new_config.collectorPort = self.netFlow_collector_port
+ if current_config.observationDomainId != self.netFlow_observation_domain_id:
+ changed = changed_observationDomainId = True
+ observationDomainId_previous = current_config.observationDomainId
+ new_config.observationDomainId = self.netFlow_observation_domain_id
+ if current_config.activeFlowTimeout != self.netFlow_active_flow_timeout:
+ changed = changed_activeFlowTimeout = True
+ activeFlowTimeout_previous = current_config.activeFlowTimeout
+ new_config.activeFlowTimeout = self.netFlow_active_flow_timeout
+ if current_config.idleFlowTimeout != self.netFlow_idle_flow_timeout:
+ changed = changed_idleFlowTimeout = True
+ idleFlowTimeout_previous = current_config.idleFlowTimeout
+ new_config.idleFlowTimeout = self.netFlow_idle_flow_timeout
+ if current_config.samplingRate != self.netFlow_sampling_rate:
+ changed = changed_samplingRate = True
+ samplingRate_previous = current_config.samplingRate
+ new_config.samplingRate = self.netFlow_sampling_rate
+ if current_config.internalFlowsOnly != self.netFlow_internal_flows_only:
+ changed = changed_internalFlowsOnly = True
+ internalFlowsOnly_previous = current_config.internalFlowsOnly
+ new_config.internalFlowsOnly = self.netFlow_internal_flows_only
+
+ return (new_config, changed, changed_collectorIpAddress, collectorIpAddress_previous,
+ changed_collectorPort, collectorPort_previous, changed_observationDomainId, observationDomainId_previous,
+ changed_activeFlowTimeout, activeFlowTimeout_previous, changed_idleFlowTimeout, idleFlowTimeout_previous,
+ changed_samplingRate, samplingRate_previous, changed_internalFlowsOnly, internalFlowsOnly_previous)
+
+ def exit_unchanged(self):
+ """Exit with status message"""
+ changed = False
+ results = dict(changed=changed)
+ results['dvswitch'] = self.switch_name
+ results['result'] = "DVS not present"
+ self.module.exit_json(**results)
+
+ def destroy_dvswitch(self):
+ """Delete a DVS"""
+ changed = True
+ results = dict(changed=changed)
+ results['dvswitch'] = self.switch_name
+
+ if self.module.check_mode:
+ results['result'] = "DVS would be deleted"
+ else:
+ try:
+ task = self.dvs.Destroy_Task()
+ except vim.fault.VimFault as vim_fault:
+ self.module.fail_json(msg="Failed to deleted DVS : %s" % to_native(vim_fault))
+ wait_for_task(task)
+ results['result'] = "DVS deleted"
+ self.module.exit_json(**results)
+
+ def update_dvswitch(self):
+ """Check and update DVS settings"""
+ changed = changed_settings = changed_ldp = changed_version = changed_health_check = changed_network_policy = changed_netFlow = False
+ results = dict(changed=changed)
+ results['dvswitch'] = self.switch_name
+ changed_list = []
+ message = ''
+
+ config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
+ # Use the same version in the new spec; The version will be increased by one by the API automatically
+ config_spec.configVersion = self.dvs.config.configVersion
+
+ # Check MTU
+ results['mtu'] = self.mtu
+ if self.dvs.config.maxMtu != self.mtu:
+ changed = changed_settings = True
+ changed_list.append("mtu")
+ results['mtu_previous'] = config_spec.maxMtu
+ config_spec.maxMtu = self.mtu
+
+ # Check Discovery Protocol type and operation
+ ldp_protocol = self.dvs.config.linkDiscoveryProtocolConfig.protocol
+ ldp_operation = self.dvs.config.linkDiscoveryProtocolConfig.operation
+ if self.discovery_protocol == 'disabled':
+ results['discovery_protocol'] = self.discovery_protocol
+ results['discovery_operation'] = 'n/a'
+ if ldp_protocol != 'cdp' or ldp_operation != 'none':
+ changed_ldp = True
+ results['discovery_protocol_previous'] = ldp_protocol
+ results['discovery_operation_previous'] = ldp_operation
+ else:
+ results['discovery_protocol'] = self.discovery_protocol
+ results['discovery_operation'] = self.discovery_operation
+ if ldp_protocol != self.discovery_protocol or ldp_operation != self.discovery_operation:
+ changed_ldp = True
+ if ldp_protocol != self.discovery_protocol:
+ results['discovery_protocol_previous'] = ldp_protocol
+ if ldp_operation != self.discovery_operation:
+ results['discovery_operation_previous'] = ldp_operation
+ if changed_ldp:
+ changed = changed_settings = True
+ changed_list.append("discovery protocol")
+ config_spec.linkDiscoveryProtocolConfig = self.create_ldp_spec()
+
+ # Check Multicast filtering mode
+ results['multicast_filtering_mode'] = self.multicast_filtering_mode
+ multicast_filtering_mode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode)
+ if self.dvs.config.multicastFilteringMode != multicast_filtering_mode:
+ changed = changed_settings = True
+ changed_list.append("multicast filtering")
+ results['multicast_filtering_mode_previous'] = self.dvs.config.multicastFilteringMode
+ config_spec.multicastFilteringMode = multicast_filtering_mode
+
+ # Check administrator contact
+ results['contact'] = self.contact_name
+ results['contact_details'] = self.contact_details
+ if self.dvs.config.contact.name != self.contact_name or self.dvs.config.contact.contact != self.contact_details:
+ changed = changed_settings = True
+ changed_list.append("contact")
+ results['contact_previous'] = self.dvs.config.contact.name
+ results['contact_details_previous'] = self.dvs.config.contact.contact
+ config_spec.contact = self.create_contact_spec()
+
+ # Check description
+ results['description'] = self.description
+ if self.dvs.config.description != self.description:
+ changed = changed_settings = True
+ changed_list.append("description")
+ results['description_previous'] = self.dvs.config.description
+ if self.description is None:
+ # need to use empty string; will be set to None by API
+ config_spec.description = ''
+ else:
+ config_spec.description = self.description
+
+ # Check uplinks
+ results['uplink_quantity'] = self.uplink_quantity
+ if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) != self.uplink_quantity:
+ changed = changed_settings = True
+ changed_list.append("uplink quantity")
+ results['uplink_quantity_previous'] = len(self.dvs.config.uplinkPortPolicy.uplinkPortName)
+ config_spec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
+ # just replace the uplink array if uplinks need to be added
+ if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) < self.uplink_quantity:
+ for count in range(1, self.uplink_quantity + 1):
+ config_spec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count))
+ # just replace the uplink array if uplinks need to be removed
+ if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) > self.uplink_quantity:
+ for count in range(1, self.uplink_quantity + 1):
+ config_spec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count))
+ results['uplinks'] = config_spec.uplinkPortPolicy.uplinkPortName
+ results['uplinks_previous'] = self.dvs.config.uplinkPortPolicy.uplinkPortName
+ else:
+ # No uplink name check; uplink names can't be changed easily if they are used by a portgroup
+ results['uplinks'] = self.dvs.config.uplinkPortPolicy.uplinkPortName
+
+ # Check Health Check
+ results['health_check_vlan'] = self.health_check_vlan
+ results['health_check_teaming'] = self.health_check_teaming
+ results['health_check_vlan_interval'] = self.health_check_vlan_interval
+ results['health_check_teaming_interval'] = self.health_check_teaming_interval
+ (health_check_config, changed_health_check, changed_vlan, vlan_previous,
+ changed_vlan_interval, vlan_interval_previous, changed_teaming, teaming_previous,
+ changed_teaming_interval, teaming_interval_previous) = \
+ self.check_health_check_config(self.dvs.config.healthCheckConfig)
+ if changed_health_check:
+ changed = True
+ changed_list.append("health check")
+ if changed_vlan:
+ results['health_check_vlan_previous'] = vlan_previous
+ if changed_vlan_interval:
+ results['health_check_vlan_interval_previous'] = vlan_interval_previous
+ if changed_teaming:
+ results['health_check_teaming_previous'] = teaming_previous
+ if changed_teaming_interval:
+ results['health_check_teaming_interval_previous'] = teaming_interval_previous
+
+ # Check Network Policy
+ if 'promiscuous' in self.network_policy or 'forged_transmits' in self.network_policy or 'mac_changes' in self.network_policy:
+ results['network_policy'] = {}
+ if 'promiscuous' in self.network_policy:
+ results['network_policy']['promiscuous'] = self.network_policy['promiscuous']
+ if 'forged_transmits' in self.network_policy:
+ results['network_policy']['forged_transmits'] = self.network_policy['forged_transmits']
+ if 'mac_changes' in self.network_policy:
+ results['network_policy']['mac_changes'] = self.network_policy['mac_changes']
+
+ (policy, changed_network_policy, changed_promiscuous, promiscuous_previous, changed_forged_transmits,
+ forged_transmits_previous, changed_mac_changes, mac_changes_previous) = \
+ self.check_network_policy_config()
+
+ if changed_network_policy:
+ changed = changed_settings = True
+ changed_list.append("network policy")
+ results['network_policy_previous'] = {}
+ if changed_promiscuous:
+ results['network_policy_previous']['promiscuous'] = promiscuous_previous
+
+ if changed_forged_transmits:
+ results['network_policy_previous']['forged_transmits'] = forged_transmits_previous
+
+ if changed_mac_changes:
+ results['network_policy_previous']['mac_changes'] = mac_changes_previous
+
+ if config_spec.defaultPortConfig is None:
+ config_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+
+ config_spec.defaultPortConfig.macManagementPolicy = policy
+
+ # Check switch version
+ if self.switch_version:
+ results['version'] = self.switch_version
+ if self.dvs.config.productInfo.version != self.switch_version:
+ changed_version = True
+ spec_product = self.create_product_spec(self.switch_version)
+ else:
+ results['version'] = self.dvs.config.productInfo.version
+ changed_version = False
+ if changed_version:
+ changed = True
+ changed_list.append("switch version")
+ results['version_previous'] = self.dvs.config.productInfo.version
+
+ # Check NetFlow Config
+ if self.netFlow_collector_ip is not None:
+ results['net_flow_collector_ip'] = self.netFlow_collector_ip
+ results['net_flow_collector_port'] = self.netFlow_collector_port
+ results['net_flow_observation_domain_id'] = self.netFlow_observation_domain_id
+ results['net_flow_active_flow_timeout'] = self.netFlow_active_flow_timeout
+ results['net_flow_idle_flow_timeout'] = self.netFlow_idle_flow_timeout
+ results['net_flow_sampling_rate'] = self.netFlow_sampling_rate
+ results['net_flow_internal_flows_only'] = self.netFlow_internal_flows_only
+ (ipfixConfig, changed_netFlow, changed_collectorIpAddress, collectorIpAddress_previous,
+ changed_collectorPort, collectorPort_previous, changed_observationDomainId, observationDomainId_previous,
+ changed_activeFlowTimeout, activeFlowTimeout_previous, changed_idleFlowTimeout, idleFlowTimeout_previous,
+ changed_samplingRate, samplingRate_previous, changed_internalFlowsOnly, internalFlowsOnly_previous) = self.check_netFlow_config()
+ if changed_netFlow:
+ changed = changed_settings = True
+ changed_list.append("netFlow")
+ if changed_collectorIpAddress:
+ results['net_flow_collector_ip_previous'] = collectorIpAddress_previous
+ if changed_collectorPort:
+ results['net_flow_collector_port_previous'] = collectorPort_previous
+ if changed_observationDomainId:
+ results['net_flow_observation_domain_id_previous'] = observationDomainId_previous
+ if changed_activeFlowTimeout:
+ results['net_flow_active_flow_timeout_previous'] = activeFlowTimeout_previous
+ if changed_idleFlowTimeout:
+ results['net_flow_idle_flow_timeout_previous'] = idleFlowTimeout_previous
+ if changed_samplingRate:
+ results['net_flow_sampling_rate_previous'] = samplingRate_previous
+ if changed_internalFlowsOnly:
+ results['net_flow_internal_flows_only_previous'] = internalFlowsOnly_previous
+
+ config_spec.ipfixConfig = ipfixConfig
+
+ if changed:
+ if self.module.check_mode:
+ changed_suffix = ' would be changed'
+ else:
+ changed_suffix = ' changed'
+ if len(changed_list) > 2:
+ message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
+ elif len(changed_list) == 2:
+ message = ' and '.join(changed_list)
+ elif len(changed_list) == 1:
+ message = changed_list[0]
+ message += changed_suffix
+ if not self.module.check_mode:
+ if changed_settings:
+ self.update_dvs_config(self.dvs, config_spec)
+ if changed_health_check:
+ self.update_health_check_config(self.dvs, health_check_config)
+ if changed_version:
+ task = self.dvs.PerformDvsProductSpecOperation_Task("upgrade", spec_product)
+ try:
+ wait_for_task(task)
+ except TaskError as invalid_argument:
+ self.module.fail_json(msg="Failed to update DVS version : %s" % to_native(invalid_argument))
+ else:
+ message = "DVS already configured properly"
+ results['uuid'] = self.dvs.uuid
+ results['changed'] = changed
+ results['result'] = message
+
+ self.module.exit_json(**results)
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ datacenter_name=dict(aliases=['datacenter']),
+ folder=dict(),
+ switch_name=dict(required=True, aliases=['switch', 'dvswitch']),
+ mtu=dict(type='int', default=1500),
+ multicast_filtering_mode=dict(type='str', default='basic', choices=['basic', 'snooping']),
+ switch_version=dict(
+ type='str',
+ aliases=['version'],
+ default=None
+ ),
+ uplink_quantity=dict(type='int'),
+ uplink_prefix=dict(type='str', default='Uplink '),
+ discovery_proto=dict(
+ type='str', choices=['cdp', 'lldp', 'disabled'], default='cdp', aliases=['discovery_protocol']
+ ),
+ discovery_operation=dict(type='str', choices=['both', 'advertise', 'listen'], default='listen'),
+ health_check=dict(
+ type='dict',
+ options=dict(
+ vlan_mtu=dict(type='bool', default=False),
+ teaming_failover=dict(type='bool', default=False),
+ vlan_mtu_interval=dict(type='int', default=0),
+ teaming_failover_interval=dict(type='int', default=0),
+ ),
+ default=dict(
+ vlan_mtu=False,
+ teaming_failover=False,
+ vlan_mtu_interval=0,
+ teaming_failover_interval=0,
+ ),
+ ),
+ contact=dict(
+ type='dict',
+ options=dict(
+ name=dict(type='str'),
+ description=dict(type='str'),
+ ),
+ ),
+ description=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ network_policy=dict(
+ type='dict',
+ options=dict(
+ promiscuous=dict(type='bool', default=False),
+ forged_transmits=dict(type='bool', default=False),
+ mac_changes=dict(type='bool', default=False)
+ ),
+ ),
+ net_flow=dict(
+ type='dict',
+ options=dict(
+ collector_ip=dict(type='str'),
+ collector_port=dict(type='int', default=0),
+ observation_domain_id=dict(type='int', default=0),
+ active_flow_timeout=dict(type='int', default=60),
+ idle_flow_timeout=dict(type='int', default=15),
+ sampling_rate=dict(type='int', default=4096),
+ internal_flows_only=dict(type='bool', default=False),
+ ),
+ default=dict(
+ collector_port=0,
+ observation_domain_id=0,
+ active_flow_timeout=60,
+ idle_flow_timeout=15,
+ sampling_rate=4096,
+ internal_flows_only=False,
+ ),
+ ),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present',
+ ['uplink_quantity']),
+ ],
+ required_one_of=[
+ ['folder', 'datacenter_name'],
+ ],
+ mutually_exclusive=[
+ ['folder', 'datacenter_name'],
+ ],
+ supports_check_mode=True,
+ )
+
+ vmware_dvswitch = VMwareDvSwitch(module)
+ vmware_dvswitch.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_info.py
new file mode 100644
index 000000000..5cba227c8
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_info.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_dvswitch_info
+short_description: Gathers info dvswitch configurations
+description:
+ - This module can be used to gather information about dvswitch configurations.
+author:
+ - sky-joker (@sky-joker)
+options:
+ folder:
+ description:
+ - Specify a folder location of dvswitch to gather information from.
+ - 'Examples:'
+ - ' folder: /datacenter1/network'
+ - ' folder: datacenter1/network'
+ - ' folder: /datacenter1/network/folder1'
+ - ' folder: datacenter1/network/folder1'
+ - ' folder: /folder1/datacenter1/network'
+ - ' folder: folder1/datacenter1/network'
+ - ' folder: /folder1/datacenter1/network/folder2'
+ required: false
+ type: str
+ switch_name:
+ description:
+ - Name of a dvswitch to look for.
+ - If C(switch_name) not specified gather all dvswitch information.
+ aliases: ['switch', 'dvswitch']
+ required: false
+ type: str
+ schema:
+ description:
+ - Specify the output schema desired.
+ - The 'summary' output schema is the legacy output from the module
+ - The 'vsphere' output schema is the vSphere API class definition
+ which requires pyvmomi>6.7.1
+ choices: ['summary', 'vsphere']
+ default: 'summary'
+ type: str
+ properties:
+ description:
+ - Specify the properties to retrieve.
+ - If not specified, all properties are retrieved (deeply).
+ - Results are returned in a structure identical to the vsphere API.
+ - 'Example:'
+ - ' properties: ['
+ - ' "summary.name",'
+ - ' "summary.numPorts",'
+ - ' "config.maxMtu",'
+ - ' "overallStatus"'
+ - ' ]'
+ - Only valid when C(schema) is C(vsphere).
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Gather all registered dvswitch
+ community.vmware.vmware_dvswitch_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ delegate_to: localhost
+ register: dvswitch_info
+
+- name: Gather info about specific dvswitch
+ community.vmware.vmware_dvswitch_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ switch_name: DVSwitch01
+ delegate_to: localhost
+ register: dvswitch_info
+
+- name: Gather info from folder about specific dvswitch
+ community.vmware.vmware_dvswitch_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: /datacenter1/network/F01
+ switch_name: DVSwitch02
+ delegate_to: localhost
+ register: dvswitch_info
+
+- name: Gather some info from a dvswitch using the vSphere API output schema
+ community.vmware.vmware_dvswitch_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ schema: vsphere
+ properties:
+ - summary.name
+ - summary.numPorts
+ - config.maxMtu
+ - overallStatus
+ switch_name: DVSwitch01
+ register: dvswitch_info
+'''
+
+RETURN = r'''
+distributed_virtual_switches:
+ description: list of dictionary of dvswitch and their information
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "configure": {
+ "folder": "network",
+ "hosts": [
+ "esxi-test-02.local",
+ "esxi-test-01.local"
+ ],
+ "settings": {
+ "healthCheck": {
+ "TeamingHealthCheckConfig": false,
+ "VlanMtuHealthCheckConfig": false
+ },
+ "netflow": {
+ "activeFlowTimeout": 60,
+ "collectorIpAddress": "",
+ "collectorPort": 0,
+ "idleFlowTimeout": 15,
+ "internalFlowsOnly": false,
+ "observationDomainId": 0,
+ "samplingRate": 0,
+ "switchIpAddress": null
+ },
+ "properties": {
+ "administratorContact": {
+ "contact": null,
+ "name": null
+ },
+ "advanced": {
+ "maxMtu": 1500,
+ "multicastFilteringMode": "legacyFiltering"
+ },
+ "discoveryProtocol": {
+ "operation": "listen",
+ "protocol": "cdp"
+ },
+ "general": {
+ "ioControl": true,
+ "name": "DVSwitch01",
+ "numPorts": 10,
+ "numUplinks": 1,
+ "vendor": "VMware, Inc.",
+ "version": "6.6.0"
+ }
+ },
+ "privateVlan": []
+ }
+ },
+ "uuid": "50 30 99 9c a7 60 8a 4f-05 9f e7 b5 da df 8f 17"
+ }
+ ]
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_obj, find_object_by_name
+from ansible.module_utils.basic import AnsibleModule
+
+
+class VMwareDvSwitchInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(VMwareDvSwitchInfoManager, self).__init__(module)
+ self.folder = self.params['folder']
+ self.switch_name = self.params['switch_name']
+
+ folder_obj = None
+ if self.folder:
+ folder_obj = self.content.searchIndex.FindByInventoryPath(self.folder)
+ if not folder_obj:
+ self.module.fail_json(msg="Failed to find folder specified by %s" % self.folder)
+
+ if self.switch_name:
+ self.switch_objs = [find_object_by_name(self.content, self.switch_name, vim.DistributedVirtualSwitch, folder_obj)]
+ if None in self.switch_objs:
+ self.switch_objs = None
+ else:
+ self.switch_objs = find_obj(self.content, [vim.DistributedVirtualSwitch], '', first=False)
+
+ def all_info(self):
+ distributed_virtual_switches = []
+ if not self.switch_objs:
+ self.module.exit_json(changed=False, distributed_virtual_switches=distributed_virtual_switches)
+
+ for switch_obj in self.switch_objs:
+ pvlans = []
+ if switch_obj.config.pvlanConfig:
+ for vlan in switch_obj.config.pvlanConfig:
+ pvlans.append({
+ 'primaryVlanId': vlan.primaryVlanId,
+ 'secondaryVlanId': vlan.secondaryVlanId,
+ 'pvlanType': vlan.pvlanType
+ })
+
+ host_members = []
+ if switch_obj.summary.hostMember:
+ for host in switch_obj.summary.hostMember:
+ host_members.append(host.name)
+
+ health_check = {}
+ for health_config in switch_obj.config.healthCheckConfig:
+ if isinstance(health_config, vim.dvs.VmwareDistributedVirtualSwitch.VlanMtuHealthCheckConfig):
+ health_check['VlanMtuHealthCheckConfig'] = health_config.enable
+ elif isinstance(health_config, vim.dvs.VmwareDistributedVirtualSwitch.TeamingHealthCheckConfig):
+ health_check['TeamingHealthCheckConfig'] = health_config.enable
+
+ distributed_virtual_switches.append({
+ 'configure': {
+ 'settings': {
+ 'properties': {
+ 'general': {
+ 'name': switch_obj.name,
+ 'vendor': switch_obj.config.productInfo.vendor,
+ 'version': switch_obj.config.productInfo.version,
+ 'numUplinks': len(switch_obj.config.uplinkPortPolicy.uplinkPortName),
+ 'numPorts': switch_obj.summary.numPorts,
+ 'ioControl': switch_obj.config.networkResourceManagementEnabled,
+ },
+ 'advanced': {
+ 'maxMtu': switch_obj.config.maxMtu,
+ 'multicastFilteringMode': switch_obj.config.multicastFilteringMode,
+ },
+ 'discoveryProtocol': {
+ 'protocol': switch_obj.config.linkDiscoveryProtocolConfig.protocol,
+ 'operation': switch_obj.config.linkDiscoveryProtocolConfig.operation,
+ },
+ 'administratorContact': {
+ 'name': switch_obj.config.contact.name,
+ 'contact': switch_obj.config.contact.contact
+ }
+ },
+ 'privateVlan': pvlans,
+ 'netflow': {
+ 'switchIpAddress': switch_obj.config.switchIpAddress,
+ 'collectorIpAddress': switch_obj.config.ipfixConfig.collectorIpAddress,
+ 'collectorPort': switch_obj.config.ipfixConfig.collectorPort,
+ 'observationDomainId': switch_obj.config.ipfixConfig.observationDomainId,
+ 'activeFlowTimeout': switch_obj.config.ipfixConfig.activeFlowTimeout,
+ 'idleFlowTimeout': switch_obj.config.ipfixConfig.idleFlowTimeout,
+ 'samplingRate': switch_obj.config.ipfixConfig.samplingRate,
+ 'internalFlowsOnly': switch_obj.config.ipfixConfig.internalFlowsOnly
+ },
+ 'healthCheck': health_check
+ },
+ 'hosts': host_members,
+ 'folder': switch_obj.parent.name,
+ 'name': switch_obj.name,
+ },
+ 'uuid': switch_obj.uuid,
+ })
+
+ self.module.exit_json(changed=False, distributed_virtual_switches=distributed_virtual_switches)
+
+ def properties_facts(self):
+ distributed_virtual_switches = []
+ for switch_obj in self.switch_objs:
+ distributed_virtual_switches.append(self.to_json(switch_obj, self.params.get('properties')))
+
+ self.module.exit_json(changed=False, distributed_virtual_switches=distributed_virtual_switches)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ folder=dict(type='str', required=False),
+ switch_name=dict(type='str', required=False, aliases=['switch', 'dvswitch']),
+ schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'),
+ properties=dict(type='list', required=False, elements='str')
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vmware_dvswitch_info_mgr = VMwareDvSwitchInfoManager(module)
+
+ if module.params['schema'] == 'summary':
+ vmware_dvswitch_info_mgr.all_info()
+ else:
+ vmware_dvswitch_info_mgr.properties_facts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_lacp.py b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_lacp.py
new file mode 100644
index 000000000..7e86b96a6
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_lacp.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_dvswitch_lacp
+short_description: Manage LACP configuration on a Distributed Switch
+description:
+ - This module can be used to configure Link Aggregation Control Protocol (LACP) support mode and Link Aggregation Groups (LAGs).
+author:
+- Christian Kotte (@ckotte)
+notes:
+ - You need to run the task two times if you want to remove all LAGs and change the support mode to 'basic'
+options:
+ switch:
+ description:
+ - The name of the Distributed Switch to manage.
+ required: true
+ aliases: ['dvswitch']
+ type: str
+ support_mode:
+ description:
+ - The LACP support mode.
+ - 'C(basic): One Link Aggregation Control Protocol group in the switch (singleLag).'
+ - 'C(enhanced): Multiple Link Aggregation Control Protocol groups in the switch (multipleLag).'
+ type: str
+ default: 'basic'
+ choices: ['basic', 'enhanced']
+ link_aggregation_groups:
+ description:
+ - Can only be used if C(lacp_support) is set to C(enhanced).
+ suboptions:
+ name:
+ type: str
+ description: Name of the LAG.
+ uplink_number:
+ type: int
+ description:
+ - Number of uplinks.
+ - Can 1 to 30.
+ mode:
+ type: str
+ description:
+ - The negotiating state of the uplinks/ports.
+ choices: [ active, passive ]
+ load_balancing_mode:
+ type: str
+ description:
+ - Load balancing algorithm.
+ - Valid values are as follows
+ - '- srcTcpUdpPort: Source TCP/UDP port number.'
+ - '- srcDestIpTcpUdpPortVlan: Source and destination IP, source and destination TCP/UDP port number and VLAN.'
+ - '- srcIpVlan: Source IP and VLAN.'
+ - '- srcDestTcpUdpPort: Source and destination TCP/UDP port number.'
+ - '- srcMac: Source MAC address.'
+ - '- destIp: Destination IP.'
+ - '- destMac: Destination MAC address.'
+ - '- vlan: VLAN only.'
+ - '- srcDestIp: Source and Destination IP.'
+ - '- srcIpTcpUdpPortVlan: Source IP, TCP/UDP port number and VLAN.'
+ - '- srcDestIpTcpUdpPort: Source and destination IP and TCP/UDP port number.'
+ - '- srcDestMac: Source and destination MAC address.'
+ - '- destIpTcpUdpPort: Destination IP and TCP/UDP port number.'
+ - '- srcPortId: Source Virtual Port Id.'
+ - '- srcIp: Source IP.'
+ - '- srcIpTcpUdpPort: Source IP and TCP/UDP port number.'
+ - '- destIpTcpUdpPortVlan: Destination IP, TCP/UDP port number and VLAN.'
+ - '- destTcpUdpPort: Destination TCP/UDP port number.'
+ - '- destIpVlan: Destination IP and VLAN.'
+ - '- srcDestIpVlan: Source and destination IP and VLAN.'
+ - Please see examples for more information.
+ default: 'srcDestIpTcpUdpPortVlan'
+ elements: dict
+ type: list
+ default: []
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Enable enhanced mode on a Distributed Switch
+ community.vmware.vmware_dvswitch_lacp:
+ hostname: '{{ inventory_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ switch: dvSwitch
+ support_mode: enhanced
+ validate_certs: "{{ validate_vcenter_certs }}"
+ delegate_to: localhost
+ loop_control:
+ label: "{{ item.name }}"
+ with_items: "{{ vcenter_distributed_switches }}"
+
+- name: Enable enhanced mode and create two LAGs on a Distributed Switch
+ community.vmware.vmware_dvswitch_lacp:
+ hostname: '{{ inventory_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ switch: dvSwitch
+ support_mode: enhanced
+ link_aggregation_groups:
+ - name: lag1
+ uplink_number: 2
+ mode: active
+ load_balancing_mode: srcDestIpTcpUdpPortVlan
+ - name: lag2
+ uplink_number: 2
+ mode: passive
+ load_balancing_mode: srcDestIp
+ validate_certs: "{{ validate_vcenter_certs }}"
+ delegate_to: localhost
+ loop_control:
+ label: "{{ item.name }}"
+ with_items: "{{ vcenter_distributed_switches }}"
+'''
+
+RETURN = r'''
+result:
+ description: information about performed operation
+ returned: always
+ type: str
+ sample: {
+ "changed": true,
+ "dvswitch": "dvSwitch",
+ "link_aggregation_groups": [
+ {"load_balancing_mode": "srcDestIpTcpUdpPortVlan", "mode": "active", "name": "lag1", "uplink_number": 2},
+ {"load_balancing_mode": "srcDestIp", "mode": "active", "name": "lag2", "uplink_number": 2}
+ ],
+ "link_aggregation_groups_previous": [],
+ "support_mode": "enhanced",
+ "result": "lacp lags changed"
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task
+)
+
+
+class VMwareDvSwitchLacp(PyVmomi):
+ """Class to manage a LACP on a Distributed Virtual Switch"""
+
+ def __init__(self, module):
+ super(VMwareDvSwitchLacp, self).__init__(module)
+ self.switch_name = self.module.params['switch']
+ self.support_mode = self.module.params['support_mode']
+ self.link_aggregation_groups = self.module.params['link_aggregation_groups']
+ if self.support_mode == 'basic' and (
+ self.link_aggregation_groups and not (
+ len(self.link_aggregation_groups) == 1 and self.link_aggregation_groups[0] == '')):
+ self.module.fail_json(
+ msg="LAGs can only be configured if 'support_mode' is set to 'enhanced'!"
+ )
+ self.dvs = find_dvs_by_name(self.content, self.switch_name)
+ if self.dvs is None:
+ self.module.fail_json(msg="Failed to find DVS %s" % self.switch_name)
+
+ def ensure(self):
+ """Manage LACP configuration"""
+ changed = changed_support_mode = changed_lags = False
+ results = dict(changed=changed)
+ results['dvswitch'] = self.switch_name
+ changed_list = []
+ message = ''
+
+ spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
+ spec.configVersion = self.dvs.config.configVersion
+
+ # Check support mode
+ results['support_mode'] = self.support_mode
+ lacp_support_mode = self.get_lacp_support_mode(self.support_mode)
+ if self.dvs.config.lacpApiVersion != lacp_support_mode:
+ changed = changed_support_mode = True
+ changed_list.append("support mode")
+ results['support_mode_previous'] = self.get_lacp_support_mode(self.dvs.config.lacpApiVersion)
+ spec.lacpApiVersion = lacp_support_mode
+
+ # Check LAGs
+ results["link_aggregation_groups"] = self.link_aggregation_groups
+ if self.link_aggregation_groups and not (
+ len(self.link_aggregation_groups) == 1
+ and self.link_aggregation_groups[0] == ""
+ ):
+ if self.dvs.config.lacpGroupConfig:
+ lacp_lag_list = []
+ # Check if desired LAGs are configured
+ for lag in self.link_aggregation_groups:
+ (
+ lag_name,
+ lag_mode,
+ lag_uplink_number,
+ lag_load_balancing_mode,
+ ) = self.get_lacp_lag_options(lag)
+ lag_found = False
+ for lacp_group in self.dvs.config.lacpGroupConfig:
+ if lacp_group.name == lag_name:
+ lag_found = True
+ if (
+ lag_mode != lacp_group.mode
+ or lag_uplink_number != lacp_group.uplinkNum
+ or lag_load_balancing_mode
+ != lacp_group.loadbalanceAlgorithm
+ ):
+ changed = changed_lags = True
+ lacp_lag_list.append(
+ self.create_lacp_group_spec(
+ "edit",
+ lacp_group.key,
+ lag_name,
+ lag_uplink_number,
+ lag_mode,
+ lag_load_balancing_mode,
+ )
+ )
+ break
+ if lag_found is False:
+ changed = changed_lags = True
+ lacp_lag_list.append(
+ self.create_lacp_group_spec(
+ 'add', None, lag_name, lag_uplink_number, lag_mode, lag_load_balancing_mode
+ )
+ )
+ # Check if LAGs need to be removed
+ for lacp_group in self.dvs.config.lacpGroupConfig:
+ lag_found = False
+ for lag in self.link_aggregation_groups:
+ result = self.get_lacp_lag_options(lag)
+ if lacp_group.name == result[0]:
+ lag_found = True
+ break
+ if lag_found is False:
+ changed = changed_lags = True
+ lacp_lag_list.append(
+ self.create_lacp_group_spec('remove', lacp_group.key, lacp_group.name, None, None, None)
+ )
+ else:
+ changed = changed_lags = True
+ lacp_lag_list = []
+ for lag in self.link_aggregation_groups:
+ lag_name, lag_mode, lag_uplink_number, lag_load_balancing_mode = self.get_lacp_lag_options(lag)
+ lacp_lag_list.append(
+ self.create_lacp_group_spec(
+ 'add', None, lag_name, lag_uplink_number, lag_mode, lag_load_balancing_mode
+ )
+ )
+ else:
+ if self.dvs.config.lacpGroupConfig:
+ changed = changed_lags = True
+ lacp_lag_list = []
+ for lacp_group in self.dvs.config.lacpGroupConfig:
+ lacp_lag_list.append(
+ self.create_lacp_group_spec('remove', lacp_group.key, lacp_group.name, None, None, None)
+ )
+ if changed_lags:
+ changed_list.append("link aggregation groups")
+ current_lags_list = []
+ for lacp_group in self.dvs.config.lacpGroupConfig:
+ temp_lag = dict()
+ temp_lag['name'] = lacp_group.name
+ temp_lag['uplink_number'] = lacp_group.uplinkNum
+ temp_lag['mode'] = lacp_group.mode
+ temp_lag['load_balancing_mode'] = lacp_group.loadbalanceAlgorithm
+ current_lags_list.append(temp_lag)
+ results['link_aggregation_groups_previous'] = current_lags_list
+
+ if changed:
+ if self.module.check_mode:
+ changed_suffix = ' would be changed'
+ else:
+ changed_suffix = ' changed'
+ if len(changed_list) > 2:
+ message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
+ elif len(changed_list) == 2:
+ message = ' and '.join(changed_list)
+ elif len(changed_list) == 1:
+ message = changed_list[0]
+ message += changed_suffix
+ if not self.module.check_mode:
+ if changed_support_mode and self.support_mode == 'basic' and changed_lags:
+ self.update_lacp_group_config(self.dvs, lacp_lag_list)
+ # NOTE: You need to run the task again to change the support mode to 'basic' as well
+ # No matter how long you sleep, you will always get the following error in vCenter:
+ # 'Cannot complete operation due to concurrent modification by another operation.'
+ # self.update_dvs_config(self.dvs, spec)
+ else:
+ if changed_support_mode:
+ self.update_dvs_config(self.dvs, spec)
+ if changed_lags:
+ self.update_lacp_group_config(self.dvs, lacp_lag_list)
+ else:
+ message = "LACP already configured properly"
+ results['changed'] = changed
+ results['result'] = message
+
+ self.module.exit_json(**results)
+
+ @staticmethod
+ def get_lacp_support_mode(mode):
+ """Get LACP support mode"""
+ return_mode = None
+ if mode == 'basic':
+ return_mode = 'singleLag'
+ elif mode == 'enhanced':
+ return_mode = 'multipleLag'
+ elif mode == 'singleLag':
+ return_mode = 'basic'
+ elif mode == 'multipleLag':
+ return_mode = 'enhanced'
+ return return_mode
+
+ def get_lacp_lag_options(self, lag):
+ """Get and check LACP LAG options"""
+ lag_name = lag.get('name', None)
+ if lag_name is None:
+ self.module.fail_json(msg="Please specify name in lag options as it's a required parameter")
+ lag_mode = lag.get('mode', None)
+ if lag_mode is None:
+ self.module.fail_json(msg="Please specify mode in lag options as it's a required parameter")
+ lag_uplink_number = lag.get('uplink_number', None)
+ if lag_uplink_number is None:
+ self.module.fail_json(msg="Please specify uplink_number in lag options as it's a required parameter")
+ try:
+ lag_uplink_number = int(lag_uplink_number)
+ except ValueError:
+ self.module.fail_json(msg="Failed to parse uplink_number in lag options")
+
+ if lag_uplink_number > 30:
+ self.module.fail_json(msg="More than 30 uplinks are not supported in a single LAG!")
+ lag_load_balancing_mode = lag.get('load_balancing_mode', None)
+ supported_lb_modes = ['srcTcpUdpPort', 'srcDestIpTcpUdpPortVlan', 'srcIpVlan', 'srcDestTcpUdpPort',
+ 'srcMac', 'destIp', 'destMac', 'vlan', 'srcDestIp', 'srcIpTcpUdpPortVlan',
+ 'srcDestIpTcpUdpPort', 'srcDestMac', 'destIpTcpUdpPort', 'srcPortId', 'srcIp',
+ 'srcIpTcpUdpPort', 'destIpTcpUdpPortVlan', 'destTcpUdpPort', 'destIpVlan', 'srcDestIpVlan']
+ if lag_load_balancing_mode is None:
+ self.module.fail_json(msg="Please specify load_balancing_mode in lag options as it's a required parameter")
+ elif lag_load_balancing_mode not in supported_lb_modes:
+ self.module.fail_json(msg="The specified load balancing mode '%s' isn't supported!" % lag_load_balancing_mode)
+ return lag_name, lag_mode, lag_uplink_number, lag_load_balancing_mode
+
+ @staticmethod
+ def create_lacp_group_spec(operation, key, name, uplink_number, mode, load_balancing_mode):
+ """
+ Create LACP group spec
+ operation: add, edit, or remove
+ Returns: LACP group spec
+ """
+ lacp_spec = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupSpec()
+ lacp_spec.operation = operation
+ lacp_spec.lacpGroupConfig = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupConfig()
+ lacp_spec.lacpGroupConfig.name = name
+ if operation in ('edit', 'remove'):
+ lacp_spec.lacpGroupConfig.key = key
+ if not operation == 'remove':
+ lacp_spec.lacpGroupConfig.uplinkNum = uplink_number
+ lacp_spec.lacpGroupConfig.mode = mode
+ lacp_spec.lacpGroupConfig.loadbalanceAlgorithm = load_balancing_mode
+ # greyed out in vSphere Client!?
+ # lacp_spec.vlan = vim.dvs.VmwareDistributedVirtualSwitch.LagVlanConfig()
+ # lacp_spec.vlan.vlanId = [vim.NumericRange(...)]
+ # lacp_spec.ipfix = vim.dvs.VmwareDistributedVirtualSwitch.LagIpfixConfig()
+ # lacp_spec.ipfix.ipfixEnabled = True/False
+ return lacp_spec
+
+ def update_dvs_config(self, switch_object, spec):
+ """Update DVS config"""
+ try:
+ task = switch_object.ReconfigureDvs_Task(spec)
+ result = wait_for_task(task)
+ except TaskError as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to update DVS : %s" % to_native(invalid_argument)
+ )
+ return result
+
+ def update_lacp_group_config(self, switch_object, lacp_group_spec):
+ """Update LACP group config"""
+ try:
+ task = switch_object.UpdateDVSLacpGroupConfig_Task(lacpGroupSpec=lacp_group_spec)
+ result = wait_for_task(task)
+ except vim.fault.DvsFault as dvs_fault:
+ self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault))
+ except vmodl.fault.NotSupported as not_supported:
+ self.module.fail_json(
+ msg="Multiple Link Aggregation Control Protocol groups not supported on the switch : %s" %
+ to_native(not_supported)
+ )
+ except TaskError as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to update Link Aggregation Group : %s" % to_native(invalid_argument)
+ )
+ return result
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ switch=dict(required=True, aliases=['dvswitch']),
+ support_mode=dict(default='basic', choices=['basic', 'enhanced']),
+ link_aggregation_groups=dict(default=[], type='list', elements='dict'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ vmware_dvswitch_lacp = VMwareDvSwitchLacp(module)
+ vmware_dvswitch_lacp.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_nioc.py b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_nioc.py
new file mode 100644
index 000000000..b0ee71abc
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_nioc.py
@@ -0,0 +1,422 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, VMware, Inc.
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_dvswitch_nioc
+short_description: Manage distributed switch Network IO Control
+description:
+ - This module can be used to manage distributed switch Network IO Control configurations.
+author:
+ - Joseph Andreatta (@vmwjoseph)
+options:
+ switch:
+ description:
+ - The name of the distributed switch.
+ required: true
+ aliases: ['dvswitch']
+ type: str
+ version:
+ description:
+ - Network IO control version.
+ choices:
+ - 'version2'
+ - 'version3'
+ required: false
+ type: str
+ state:
+ description:
+ - Enable or disable NIOC on the distributed switch.
+ default: 'present'
+ choices: ['present', 'absent']
+ required: false
+ type: str
+ resources:
+ description:
+ - List of dicts containing.
+ suboptions:
+ name:
+ description:
+ - Resource name.
+ choices: ["faultTolerance", "hbr", "iSCSI", "management", "nfs", "vdp", "virtualMachine", "vmotion", "vsan", "backupNfc", "nvmetcp"]
+ required: true
+ type: str
+ limit:
+ description:
+ - The maximum allowed usage for a traffic class belonging to this resource pool per host physical NIC.
+ default: -1
+ type: int
+ reservation:
+ description:
+ - Ignored if NIOC version is set to version2
+ - Amount of bandwidth resource that is guaranteed available to the host infrastructure traffic class.
+ - If the utilization is less than the reservation, the extra bandwidth is used for other host infrastructure traffic class types.
+ - Reservation is not allowed to exceed the value of limit, if limit is set.
+ - Unit is Mbits/sec.
+ - Ignored unless version is "version3".
+ - Amount of bandwidth resource that is guaranteed available to the host infrastructure traffic class.
+ type: int
+ default: 0
+
+ shares_level:
+ description:
+ - The allocation level
+ - The level is a simplified view of shares.
+ - Levels map to a pre-determined set of numeric values for shares.
+ choices: [ "low", "normal", "high", "custom" ]
+ type: str
+ shares:
+ description:
+ - The number of shares allocated.
+ - Ignored unless C(shares_level) is "custom".
+ type: int
+ required: false
+ type: list
+ default: []
+ elements: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+RETURN = r'''
+dvswitch_nioc_status:
+ description:
+ - result of the changes
+ returned: success
+ type: str
+resources_changed:
+ description:
+ - list of resources which were changed
+ returned: success
+ type: list
+ sample: [ "vmotion", "vsan" ]
+'''
+
+EXAMPLES = r'''
+- name: Enable NIOC
+ community.vmware.vmware_dvswitch_nioc:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ switch: dvSwitch
+ version: version3
+ resources:
+ - name: vmotion
+ limit: -1
+ reservation: 128
+ shares_level: normal
+ - name: vsan
+ limit: -1
+ shares_level: custom
+ shares: 99
+ reservation: 256
+ state: present
+ delegate_to: localhost
+
+- name: Disable NIOC
+ community.vmware.vmware_dvswitch_nioc:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ switch: dvSwitch
+ state: absent
+ delegate_to: localhost
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ find_dvs_by_name,
+ vmware_argument_spec,
+ wait_for_task)
+
+
+class VMwareDVSwitchNIOC(PyVmomi):
+
+ def __init__(self, module):
+ super(VMwareDVSwitchNIOC, self).__init__(module)
+ self.dvs = None
+ self.resource_changes = list()
+ self.switch = module.params['switch']
+ self.version = module.params.get('version')
+ self.state = module.params['state']
+ self.resources = module.params.get('resources')
+ self.result = {
+ 'changed': False,
+ 'dvswitch_nioc_status': 'Unchanged',
+ 'resources_changed': list(),
+ }
+
+ def process_state(self):
+ nioc_states = {
+ 'absent': {
+ 'present': self.state_disable_nioc,
+ 'absent': self.state_exit,
+ },
+ 'present': {
+ 'version': self.state_update_nioc_version,
+ 'update': self.state_update_nioc_resources,
+ 'present': self.state_exit,
+ 'absent': self.state_enable_nioc,
+ }
+ }
+ nioc_states[self.state][self.check_nioc_state()]()
+ self.state_exit()
+
+ def state_exit(self):
+ self.module.exit_json(**self.result)
+
+ def state_disable_nioc(self):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.set_nioc_enabled(False)
+ self.result['dvswitch_nioc_status'] = 'Disabled NIOC'
+
+ def state_enable_nioc(self):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.set_nioc_enabled(True)
+ self.set_nioc_version()
+ self.result['dvswitch_nioc_status'] = "Enabled NIOC with version %s" % self.version
+
+ # Check resource state and apply all required changes
+ if self.check_resources() == 'update':
+ self.set_nioc_resources(self.resource_changes)
+
+ def state_update_nioc_version(self):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.set_nioc_version()
+ self.result['dvswitch_nioc_status'] = "Set NIOC to version %s" % self.version
+
+ # Check resource state and apply all required changes
+ if self.check_resources() == 'update':
+ self.set_nioc_resources(self.resource_changes)
+
+ def state_update_nioc_resources(self):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.result['dvswitch_nioc_status'] = "Resource configuration modified"
+ self.set_nioc_resources(self.resource_changes)
+
+ def set_nioc_enabled(self, state):
+ try:
+ self.dvs.EnableNetworkResourceManagement(enable=state)
+ except vim.fault.DvsFault as dvs_fault:
+ self.module.fail_json(msg='DvsFault while setting NIOC enabled=%r: %s' % (state, to_native(dvs_fault.msg)))
+ except vim.fault.DvsNotAuthorized as auth_fault:
+ self.module.fail_json(msg='Not authorized to set NIOC enabled=%r: %s' % (state, to_native(auth_fault.msg)))
+ except vmodl.fault.NotSupported as support_fault:
+ self.module.fail_json(msg='NIOC not supported by DVS: %s' % to_native(support_fault.msg))
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg='RuntimeFault while setting NIOC enabled=%r: %s' % (state, to_native(runtime_fault.msg)))
+
+ def set_nioc_version(self):
+ upgrade_spec = vim.DistributedVirtualSwitch.ConfigSpec()
+ upgrade_spec.configVersion = self.dvs.config.configVersion
+ if not self.version:
+ self.version = 'version2'
+ upgrade_spec.networkResourceControlVersion = self.version
+
+ try:
+ task = self.dvs.ReconfigureDvs_Task(spec=upgrade_spec)
+ wait_for_task(task)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg="RuntimeFault when setting NIOC version: %s " % to_native(runtime_fault.msg))
+
+ def check_nioc_state(self):
+ self.dvs = find_dvs_by_name(self.content, self.switch)
+
+ if self.dvs is None:
+ self.module.fail_json(msg='DVS %s was not found.' % self.switch)
+ else:
+ if not self.dvs.config.networkResourceManagementEnabled:
+ return 'absent'
+ if self.version and self.dvs.config.networkResourceControlVersion != self.version:
+ return 'version'
+
+ # NIOC is enabled and the correct version, so return the state of the resources
+ return self.check_resources()
+
+ def check_resources(self):
+ self.dvs = find_dvs_by_name(self.content, self.switch)
+ if self.dvs is None:
+ self.module.fail_json(msg="DVS named '%s' was not found" % self.switch)
+
+ for resource in self.resources:
+ if self.check_resource_state(resource) == 'update':
+ self.resource_changes.append(resource)
+ self.result['resources_changed'].append(resource['name'])
+
+ if len(self.resource_changes) > 0:
+ return 'update'
+ return 'present'
+
+ def check_resource_state(self, resource):
+ resource_cfg = self.find_netioc_by_key(resource['name'])
+ if resource_cfg is None:
+ self.module.fail_json(msg="NetIOC resource named '%s' was not found" % resource['name'])
+
+ rc = {
+ "limit": resource_cfg.allocationInfo.limit,
+ "shares_level": resource_cfg.allocationInfo.shares.level
+ }
+ if resource_cfg.allocationInfo.shares.level == 'custom':
+ rc["shares"] = resource_cfg.allocationInfo.shares.shares
+ if self.dvs.config.networkResourceControlVersion == "version3":
+ rc["reservation"] = resource_cfg.allocationInfo.reservation
+
+ for k, v in rc.items():
+ if k in resource and v != resource[k]:
+ return 'update'
+ return 'valid'
+
+ def set_nioc_resources(self, resources):
+ if self.dvs.config.networkResourceControlVersion == 'version3':
+ self._update_version3_resources(resources)
+ elif self.dvs.config.networkResourceControlVersion == 'version2':
+ self._update_version2_resources(resources)
+
+ def _update_version3_resources(self, resources):
+ allocations = list()
+
+ for resource in resources:
+ allocation = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource()
+ allocation.allocationInfo = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource.ResourceAllocation()
+ allocation.key = resource['name']
+ if 'limit' in resource:
+ allocation.allocationInfo.limit = resource['limit']
+ if 'reservation' in resource:
+ allocation.allocationInfo.reservation = resource['reservation']
+ if 'shares_level' in resource:
+ allocation.allocationInfo.shares = vim.SharesInfo()
+ allocation.allocationInfo.shares.level = resource['shares_level']
+ if 'shares' in resource and resource['shares_level'] == 'custom':
+ allocation.allocationInfo.shares.shares = resource['shares']
+ elif resource['shares_level'] == 'custom':
+ self.module.fail_json(
+ msg="Resource %s, shares_level set to custom but shares not specified" % resource['name']
+ )
+
+ allocations.append(allocation)
+
+ spec = vim.DistributedVirtualSwitch.ConfigSpec()
+ spec.configVersion = self.dvs.config.configVersion
+ spec.infrastructureTrafficResourceConfig = allocations
+
+ task = self.dvs.ReconfigureDvs_Task(spec)
+ wait_for_task(task)
+
+ def _update_version2_resources(self, resources):
+ allocations = list()
+
+ for resource in resources:
+ resource_cfg = self.find_netioc_by_key(resource['name'])
+ allocation = vim.DVSNetworkResourcePoolConfigSpec()
+ allocation.allocationInfo = vim.DVSNetworkResourcePoolAllocationInfo()
+ allocation.key = resource['name']
+ allocation.configVersion = resource_cfg.configVersion
+ if 'limit' in resource:
+ allocation.allocationInfo.limit = resource['limit']
+ if 'shares_level' in resource:
+ allocation.allocationInfo.shares = vim.SharesInfo()
+ allocation.allocationInfo.shares.level = resource['shares_level']
+ if 'shares' in resource and resource['shares_level'] == 'custom':
+ allocation.allocationInfo.shares.shares = resource['shares']
+
+ allocations.append(allocation)
+
+ self.dvs.UpdateNetworkResourcePool(allocations)
+
+ def find_netioc_by_key(self, resource_name):
+ config = None
+ if self.dvs.config.networkResourceControlVersion == "version3":
+ config = self.dvs.config.infrastructureTrafficResourceConfig
+ elif self.dvs.config.networkResourceControlVersion == "version2":
+ config = self.dvs.networkResourcePool
+
+ for obj in config:
+ if obj.key == resource_name:
+ return obj
+ return None
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+
+ argument_spec.update(
+ dict(
+ switch=dict(required=True, type='str', aliases=['dvswitch']),
+ version=dict(type='str', choices=['version2', 'version3']),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ resources=dict(
+ type='list',
+ default=list(),
+ elements='dict',
+ options=dict(
+ name=dict(
+ type='str',
+ required=True,
+ choices=[
+ 'faultTolerance',
+ 'hbr',
+ 'iSCSI',
+ 'management',
+ 'nfs',
+ 'vdp',
+ 'virtualMachine',
+ 'vmotion',
+ 'vsan',
+ 'backupNfc',
+ 'nvmetcp'
+ ]
+ ),
+ limit=dict(type='int', default=-1),
+ shares_level=dict(
+ type='str',
+ required=False,
+ choices=[
+ 'low',
+ 'normal',
+ 'high',
+ 'custom'
+ ]
+ ),
+ shares=dict(type='int', required=False),
+ reservation=dict(type='int', default=0)
+ )
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ try:
+ vmware_dvswitch_nioc = VMwareDVSwitchNIOC(module)
+ vmware_dvswitch_nioc.process_state()
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=to_native(method_fault.msg))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_pvlans.py b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_pvlans.py
new file mode 100644
index 000000000..ed84a2116
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_pvlans.py
@@ -0,0 +1,526 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_dvswitch_pvlans
+short_description: Manage Private VLAN configuration of a Distributed Switch
+description:
+ - This module can be used to configure Private VLANs (PVLANs) on a Distributed Switch.
+author:
+- Christian Kotte (@ckotte)
+options:
+ switch:
+ description:
+ - The name of the Distributed Switch.
+ type: str
+ required: true
+ aliases: ['dvswitch']
+ primary_pvlans:
+ description:
+ - A list of VLAN IDs that should be configured as Primary PVLANs.
+ - If C(primary_pvlans) isn't specified, all PVLANs will be deleted if present.
+ - Each member of the list requires primary_pvlan_id (int) set.
+ - The secondary promiscuous PVLAN will be created automatically.
+ - If C(secondary_pvlans) isn't specified, the primary PVLANs and each secondary promiscuous PVLAN will be created.
+ - Please see examples for more information.
+ type: list
+ default: []
+ elements: dict
+ secondary_pvlans:
+ description:
+ - A list of VLAN IDs that should be configured as Secondary PVLANs.
+ - 'C(primary_pvlans) need to be specified to create any Secondary PVLAN.'
+ - If C(primary_pvlans) isn't specified, all PVLANs will be deleted if present.
+ - Each member of the list requires primary_pvlan_id (int), secondary_pvlan_id (int), and pvlan_type (str) to be set.
+ - The type of the secondary PVLAN can be isolated or community. The secondary promiscuous PVLAN will be created automatically.
+ - Please see examples for more information.
+ type: list
+ default: []
+ elements: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create PVLANs on a Distributed Switch
+ community.vmware.vmware_dvswitch_pvlans:
+ hostname: '{{ inventory_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ switch: dvSwitch
+ primary_pvlans:
+ - primary_pvlan_id: 1
+ - primary_pvlan_id: 4
+ secondary_pvlans:
+ - primary_pvlan_id: 1
+ secondary_pvlan_id: 2
+ pvlan_type: isolated
+ - primary_pvlan_id: 1
+ secondary_pvlan_id: 3
+ pvlan_type: community
+ - primary_pvlan_id: 4
+ secondary_pvlan_id: 5
+ pvlan_type: community
+ delegate_to: localhost
+
+- name: Create primary PVLAN and secondary promiscuous PVLAN on a Distributed Switch
+ community.vmware.vmware_dvswitch_pvlans:
+ hostname: '{{ inventory_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ switch: dvSwitch
+ primary_pvlans:
+ - primary_pvlan_id: 1
+ delegate_to: localhost
+
+- name: Remove all PVLANs from a Distributed Switch
+ community.vmware.vmware_dvswitch_pvlans:
+ hostname: '{{ inventory_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ switch: dvSwitch
+ primary_pvlans: []
+ secondary_pvlans: []
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: information about performed operation
+ returned: always
+ type: str
+ sample: {
+ "changed": true,
+ "dvswitch": "dvSwitch",
+ "private_vlans": [
+ {
+ "primary_pvlan_id": 1,
+ "pvlan_type": "promiscuous",
+ "secondary_pvlan_id": 1
+ },
+ {
+ "primary_pvlan_id": 1,
+ "pvlan_type": "isolated",
+ "secondary_pvlan_id": 2
+ },
+ {
+ "primary_pvlan_id": 1,
+ "pvlan_type": "community",
+ "secondary_pvlan_id": 3
+ }
+ ],
+ "private_vlans_previous": [],
+ "result": "All private VLANs added"
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task
+)
+
+
+class VMwareDvSwitchPvlans(PyVmomi):
+ """Class to manage Private VLANs on a Distributed Virtual Switch"""
+
+ def __init__(self, module):
+ super(VMwareDvSwitchPvlans, self).__init__(module)
+ self.switch_name = self.module.params['switch']
+ if self.module.params['primary_pvlans']:
+ self.primary_pvlans = self.module.params['primary_pvlans']
+ if self.module.params['secondary_pvlans']:
+ self.secondary_pvlans = self.module.params['secondary_pvlans']
+ else:
+ self.secondary_pvlans = None
+ self.do_pvlan_sanity_checks()
+ else:
+ self.primary_pvlans = None
+ self.secondary_pvlans = None
+ self.dvs = find_dvs_by_name(self.content, self.switch_name)
+ if self.dvs is None:
+ self.module.fail_json(msg="Failed to find DVS %s" % self.switch_name)
+
+ def do_pvlan_sanity_checks(self):
+ """Do sanity checks for primary and secondary PVLANs"""
+ # Check if primary PVLANs are unique
+ for primary_vlan in self.primary_pvlans:
+ count = 0
+ primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
+ for primary_vlan_2 in self.primary_pvlans:
+ primary_pvlan_id_2 = self.get_primary_pvlan_option(primary_vlan_2)
+ if primary_pvlan_id == primary_pvlan_id_2:
+ count += 1
+ if count > 1:
+ self.module.fail_json(
+ msg="The primary PVLAN ID '%s' must be unique!" % primary_pvlan_id
+ )
+ if self.secondary_pvlans:
+ # Check if secondary PVLANs are unique
+ for secondary_pvlan in self.secondary_pvlans:
+ count = 0
+ result = self.get_secondary_pvlan_options(secondary_pvlan)
+ for secondary_pvlan_2 in self.secondary_pvlans:
+ result_2 = self.get_secondary_pvlan_options(secondary_pvlan_2)
+ if result[0] == result_2[0]:
+ count += 1
+ if count > 1:
+ self.module.fail_json(
+ msg="The secondary PVLAN ID '%s' must be unique!" % result[0]
+ )
+ # Check if secondary PVLANs are already used as primary PVLANs
+ for primary_vlan in self.primary_pvlans:
+ primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
+ for secondary_pvlan in self.secondary_pvlans:
+ result = self.get_secondary_pvlan_options(secondary_pvlan)
+ if primary_pvlan_id == result[0]:
+ self.module.fail_json(
+ msg="The secondary PVLAN ID '%s' is already used as a primary PVLAN!" %
+ result[0]
+ )
+ # Check if a primary PVLAN is present for every secondary PVLANs
+ for secondary_pvlan in self.secondary_pvlans:
+ primary_pvlan_found = False
+ result = self.get_secondary_pvlan_options(secondary_pvlan)
+ for primary_vlan in self.primary_pvlans:
+ primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
+ if result[1] == primary_pvlan_id:
+ primary_pvlan_found = True
+ break
+ if not primary_pvlan_found:
+ self.module.fail_json(
+ msg="The primary PVLAN ID '%s' isn't defined for the secondary PVLAN ID '%s'!" %
+ (result[1], result[0])
+ )
+
+ def ensure(self):
+ """Manage Private VLANs"""
+ changed = False
+ results = dict(changed=changed)
+ results['dvswitch'] = self.switch_name
+ changed_list_add = []
+ changed_list_remove = []
+
+ config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
+ # Use the same version in the new spec; The version will be increased by one by the API automatically
+ config_spec.configVersion = self.dvs.config.configVersion
+
+ # Check Private VLANs
+ results['private_vlans'] = None
+ if self.primary_pvlans:
+ desired_pvlan_list = []
+ for primary_vlan in self.primary_pvlans:
+ primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
+ temp_pvlan = dict()
+ temp_pvlan['primary_pvlan_id'] = primary_pvlan_id
+ temp_pvlan['secondary_pvlan_id'] = primary_pvlan_id
+ temp_pvlan['pvlan_type'] = 'promiscuous'
+ desired_pvlan_list.append(temp_pvlan)
+ if self.secondary_pvlans:
+ for secondary_pvlan in self.secondary_pvlans:
+ (secondary_pvlan_id,
+ secondary_vlan_primary_vlan_id,
+ pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan)
+ temp_pvlan = dict()
+ temp_pvlan['primary_pvlan_id'] = secondary_vlan_primary_vlan_id
+ temp_pvlan['secondary_pvlan_id'] = secondary_pvlan_id
+ temp_pvlan['pvlan_type'] = pvlan_type
+ desired_pvlan_list.append(temp_pvlan)
+ results['private_vlans'] = desired_pvlan_list
+ if self.dvs.config.pvlanConfig:
+ pvlan_spec_list = []
+ # Check if desired PVLANs are configured
+ for primary_vlan in self.primary_pvlans:
+ primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
+ promiscuous_found = other_found = False
+ for pvlan_object in self.dvs.config.pvlanConfig:
+ if pvlan_object.primaryVlanId == primary_pvlan_id and pvlan_object.pvlanType == 'promiscuous':
+ promiscuous_found = True
+ break
+ if not promiscuous_found:
+ changed = True
+ changed_list_add.append('promiscuous (%s, %s)' % (primary_pvlan_id, primary_pvlan_id))
+ pvlan_spec_list.append(
+ self.create_pvlan_config_spec(
+ operation='add',
+ primary_pvlan_id=primary_pvlan_id,
+ secondary_pvlan_id=primary_pvlan_id,
+ pvlan_type='promiscuous'
+ )
+ )
+ if self.secondary_pvlans:
+ for secondary_pvlan in self.secondary_pvlans:
+ (secondary_pvlan_id,
+ secondary_vlan_primary_vlan_id,
+ pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan)
+ if primary_pvlan_id == secondary_vlan_primary_vlan_id:
+ for pvlan_object_2 in self.dvs.config.pvlanConfig:
+ if (pvlan_object_2.primaryVlanId == secondary_vlan_primary_vlan_id
+ and pvlan_object_2.secondaryVlanId == secondary_pvlan_id
+ and pvlan_object_2.pvlanType == pvlan_type):
+ other_found = True
+ break
+ if not other_found:
+ changed = True
+ changed_list_add.append(
+ '%s (%s, %s)' % (pvlan_type, primary_pvlan_id, secondary_pvlan_id)
+ )
+ pvlan_spec_list.append(
+ self.create_pvlan_config_spec(
+ operation='add',
+ primary_pvlan_id=primary_pvlan_id,
+ secondary_pvlan_id=secondary_pvlan_id,
+ pvlan_type=pvlan_type
+ )
+ )
+ # Check if a PVLAN needs to be removed
+ for pvlan_object in self.dvs.config.pvlanConfig:
+ promiscuous_found = other_found = False
+ if (pvlan_object.primaryVlanId == pvlan_object.secondaryVlanId
+ and pvlan_object.pvlanType == 'promiscuous'):
+ for primary_vlan in self.primary_pvlans:
+ primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
+ if pvlan_object.primaryVlanId == primary_pvlan_id and pvlan_object.pvlanType == 'promiscuous':
+ promiscuous_found = True
+ break
+ if not promiscuous_found:
+ changed = True
+ changed_list_remove.append(
+ 'promiscuous (%s, %s)' % (pvlan_object.primaryVlanId, pvlan_object.secondaryVlanId)
+ )
+ pvlan_spec_list.append(
+ self.create_pvlan_config_spec(
+ operation='remove',
+ primary_pvlan_id=pvlan_object.primaryVlanId,
+ secondary_pvlan_id=pvlan_object.secondaryVlanId,
+ pvlan_type='promiscuous'
+ )
+ )
+ elif self.secondary_pvlans:
+ for secondary_pvlan in self.secondary_pvlans:
+ (secondary_pvlan_id,
+ secondary_vlan_primary_vlan_id,
+ pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan)
+ if (pvlan_object.primaryVlanId == secondary_vlan_primary_vlan_id
+ and pvlan_object.secondaryVlanId == secondary_pvlan_id
+ and pvlan_object.pvlanType == pvlan_type):
+ other_found = True
+ break
+ if not other_found:
+ changed = True
+ changed_list_remove.append(
+ '%s (%s, %s)' % (
+ pvlan_object.pvlanType, pvlan_object.primaryVlanId, pvlan_object.secondaryVlanId
+ )
+ )
+ pvlan_spec_list.append(
+ self.create_pvlan_config_spec(
+ operation='remove',
+ primary_pvlan_id=pvlan_object.primaryVlanId,
+ secondary_pvlan_id=pvlan_object.secondaryVlanId,
+ pvlan_type=pvlan_object.pvlanType
+ )
+ )
+ else:
+ changed = True
+ changed_list_remove.append(
+ '%s (%s, %s)' % (
+ pvlan_object.pvlanType, pvlan_object.primaryVlanId, pvlan_object.secondaryVlanId
+ )
+ )
+ pvlan_spec_list.append(
+ self.create_pvlan_config_spec(
+ operation='remove',
+ primary_pvlan_id=pvlan_object.primaryVlanId,
+ secondary_pvlan_id=pvlan_object.secondaryVlanId,
+ pvlan_type=pvlan_object.pvlanType
+ )
+ )
+ else:
+ changed = True
+ changed_list_add.append('All private VLANs')
+ pvlan_spec_list = []
+ for primary_vlan in self.primary_pvlans:
+ # the first secondary VLAN's type is always promiscuous
+ primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan)
+ pvlan_spec_list.append(
+ self.create_pvlan_config_spec(
+ operation='add',
+ primary_pvlan_id=primary_pvlan_id,
+ secondary_pvlan_id=primary_pvlan_id,
+ pvlan_type='promiscuous'
+ )
+ )
+ if self.secondary_pvlans:
+ for secondary_pvlan in self.secondary_pvlans:
+ (secondary_pvlan_id,
+ secondary_vlan_primary_vlan_id,
+ pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan)
+ if primary_pvlan_id == secondary_vlan_primary_vlan_id:
+ pvlan_spec_list.append(
+ self.create_pvlan_config_spec(
+ operation='add',
+ primary_pvlan_id=primary_pvlan_id,
+ secondary_pvlan_id=secondary_pvlan_id,
+ pvlan_type=pvlan_type
+ )
+ )
+ else:
+ # Remove PVLAN configuration if present
+ if self.dvs.config.pvlanConfig:
+ changed = True
+ changed_list_remove.append('All private VLANs')
+ pvlan_spec_list = []
+ for pvlan_object in self.dvs.config.pvlanConfig:
+ pvlan_spec_list.append(
+ self.create_pvlan_config_spec(
+ operation='remove',
+ primary_pvlan_id=pvlan_object.primaryVlanId,
+ secondary_pvlan_id=pvlan_object.secondaryVlanId,
+ pvlan_type=pvlan_object.pvlanType
+ )
+ )
+
+ if changed:
+ message_add = message_remove = None
+ if changed_list_add:
+ message_add = self.build_change_message('add', changed_list_add)
+ if changed_list_remove:
+ message_remove = self.build_change_message('remove', changed_list_remove)
+ if message_add and message_remove:
+ message = message_add + '. ' + message_remove + '.'
+ elif message_add:
+ message = message_add
+ elif message_remove:
+ message = message_remove
+ current_pvlan_list = []
+ for pvlan_object in self.dvs.config.pvlanConfig:
+ temp_pvlan = dict()
+ temp_pvlan['primary_pvlan_id'] = pvlan_object.primaryVlanId
+ temp_pvlan['secondary_pvlan_id'] = pvlan_object.secondaryVlanId
+ temp_pvlan['pvlan_type'] = pvlan_object.pvlanType
+ current_pvlan_list.append(temp_pvlan)
+ results['private_vlans_previous'] = current_pvlan_list
+ config_spec.pvlanConfigSpec = pvlan_spec_list
+ if not self.module.check_mode:
+ try:
+ task = self.dvs.ReconfigureDvs_Task(config_spec)
+ wait_for_task(task)
+ except TaskError as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to update DVS : %s" % to_native(invalid_argument)
+ )
+ else:
+ message = "PVLANs already configured properly"
+ results['changed'] = changed
+ results['result'] = message
+
+ self.module.exit_json(**results)
+
+ def get_primary_pvlan_option(self, primary_vlan):
+ """Get Primary PVLAN option"""
+ primary_pvlan_id = primary_vlan.get('primary_pvlan_id', None)
+ if primary_pvlan_id is None:
+ self.module.fail_json(
+ msg="Please specify primary_pvlan_id in primary_pvlans options as it's a required parameter"
+ )
+ if primary_pvlan_id in (0, 4095):
+ self.module.fail_json(msg="The VLAN IDs of 0 and 4095 are reserved and cannot be used as a primary PVLAN.")
+ return primary_pvlan_id
+
+ def get_secondary_pvlan_options(self, secondary_pvlan):
+ """Get Secondary PVLAN option"""
+ secondary_pvlan_id = secondary_pvlan.get('secondary_pvlan_id', None)
+ if secondary_pvlan_id is None:
+ self.module.fail_json(
+ msg="Please specify secondary_pvlan_id in secondary_pvlans options as it's a required parameter"
+ )
+ primary_pvlan_id = secondary_pvlan.get('primary_pvlan_id', None)
+ if primary_pvlan_id is None:
+ self.module.fail_json(
+ msg="Please specify primary_pvlan_id in secondary_pvlans options as it's a required parameter"
+ )
+ if secondary_pvlan_id in (0, 4095) or primary_pvlan_id in (0, 4095):
+ self.module.fail_json(
+ msg="The VLAN IDs of 0 and 4095 are reserved and cannot be used as a primary or secondary PVLAN."
+ )
+ pvlan_type = secondary_pvlan.get('pvlan_type', None)
+ supported_pvlan_types = ['isolated', 'community']
+ if pvlan_type is None:
+ self.module.fail_json(msg="Please specify pvlan_type in secondary_pvlans options as it's a required parameter")
+ elif pvlan_type not in supported_pvlan_types:
+ self.module.fail_json(msg="The specified PVLAN type '%s' isn't supported!" % pvlan_type)
+ return secondary_pvlan_id, primary_pvlan_id, pvlan_type
+
+ @staticmethod
+ def create_pvlan_config_spec(operation, primary_pvlan_id, secondary_pvlan_id, pvlan_type):
+ """
+ Create PVLAN config spec
+ operation: add, edit, or remove
+ Returns: PVLAN config spec
+ """
+ pvlan_spec = vim.dvs.VmwareDistributedVirtualSwitch.PvlanConfigSpec()
+ pvlan_spec.operation = operation
+ pvlan_spec.pvlanEntry = vim.dvs.VmwareDistributedVirtualSwitch.PvlanMapEntry()
+ pvlan_spec.pvlanEntry.primaryVlanId = primary_pvlan_id
+ pvlan_spec.pvlanEntry.secondaryVlanId = secondary_pvlan_id
+ pvlan_spec.pvlanEntry.pvlanType = pvlan_type
+ return pvlan_spec
+
+ def build_change_message(self, operation, changed_list):
+ """Build the changed message"""
+ if operation == 'add':
+ changed_operation = 'added'
+ elif operation == 'remove':
+ changed_operation = 'removed'
+ if self.module.check_mode:
+ changed_suffix = ' would be %s' % changed_operation
+ else:
+ changed_suffix = ' %s' % changed_operation
+ if len(changed_list) > 2:
+ message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
+ elif len(changed_list) == 2:
+ message = ' and '.join(changed_list)
+ elif len(changed_list) == 1:
+ message = changed_list[0]
+ message += changed_suffix
+ return message
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ switch=dict(required=True, aliases=['dvswitch']),
+ primary_pvlans=dict(type='list', default=list(), elements='dict'),
+ secondary_pvlans=dict(type='list', default=list(), elements='dict'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ vmware_dvswitch_pvlans = VMwareDvSwitchPvlans(module)
+ vmware_dvswitch_pvlans.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_uplink_pg.py b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_uplink_pg.py
new file mode 100644
index 000000000..e291237c4
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_dvswitch_uplink_pg.py
@@ -0,0 +1,505 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_dvswitch_uplink_pg
+short_description: Manage uplink portproup configuration of a Distributed Switch
+description:
+ - This module can be used to configure the uplink portgroup of a Distributed Switch.
+author:
+- Christian Kotte (@ckotte)
+options:
+ switch:
+ description:
+ - The name of the Distributed Switch.
+ type: str
+ required: true
+ aliases: ['dvswitch']
+ name:
+ description:
+ - The name of the uplink portgroup.
+ - The current name will be used if not specified.
+ type: str
+ description:
+ description:
+ - The description of the uplink portgroup.
+ type: str
+ advanced:
+ description:
+ - Dictionary which configures the advanced policy settings for the uplink portgroup.
+ suboptions:
+ port_config_reset_at_disconnect:
+ description:
+ - Indicates if the configuration of a port is reset automatically after disconnect.
+ type: bool
+ default: true
+ block_override:
+ description:
+ - Indicates if the block policy can be changed per port.
+ type: bool
+ default: true
+ netflow_override:
+ type: bool
+ description:
+ - Indicates if the NetFlow policy can be changed per port.
+ default: false
+ traffic_filter_override:
+ description:
+ - Indicates if the traffic filter can be changed per port.
+ type: bool
+ default: false
+ vendor_config_override:
+ type: bool
+ description:
+ - Indicates if the vendor config can be changed per port.
+ default: false
+ vlan_override:
+ type: bool
+ description:
+ - Indicates if the vlan can be changed per port.
+ default: false
+ required: false
+ default: {
+ port_config_reset_at_disconnect: true,
+ block_override: true,
+ vendor_config_override: false,
+ vlan_override: false,
+ netflow_override: false,
+ traffic_filter_override: false,
+ }
+ aliases: ['port_policy']
+ type: dict
+ vlan_trunk_range:
+ description:
+ - The VLAN trunk range that should be configured with the uplink portgroup.
+ - 'This can be a combination of multiple ranges and numbers, example: [ 2-3967, 4049-4092 ].'
+ type: list
+ elements: str
+ default: [ '0-4094' ]
+ lacp:
+ description:
+ - Dictionary which configures the LACP settings for the uplink portgroup.
+ - The options are only used if the LACP support mode is set to 'basic'.
+ suboptions:
+ status:
+ description: Indicates if LACP is enabled.
+ default: 'disabled'
+ type: str
+ choices: [ 'enabled', 'disabled' ]
+ mode:
+ description: The negotiating state of the uplinks/ports.
+ default: 'passive'
+ type: str
+ choices: [ 'active', 'passive' ]
+ required: false
+ default: {
+ status: 'disabled',
+ mode: 'passive',
+ }
+ type: dict
+ netflow_enabled:
+ description:
+ - Indicates if NetFlow is enabled on the uplink portgroup.
+ type: bool
+ default: false
+ block_all_ports:
+ description:
+ - Indicates if all ports are blocked on the uplink portgroup.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure Uplink portgroup
+ community.vmware.vmware_dvswitch_uplink_pg:
+ hostname: '{{ inventory_hostname }}'
+ username: '{{ vcsa_username }}'
+ password: '{{ vcsa_password }}'
+ switch: dvSwitch
+ name: dvSwitch-DVUplinks
+ advanced:
+ port_config_reset_at_disconnect: true
+ block_override: true
+ vendor_config_override: false
+ vlan_override: false
+ netflow_override: false
+ traffic_filter_override: false
+ vlan_trunk_range:
+ - '0-4094'
+ netflow_enabled: false
+ block_all_ports: false
+ delegate_to: localhost
+
+- name: Enabled LACP on Uplink portgroup
+ community.vmware.vmware_dvswitch_uplink_pg:
+ hostname: '{{ inventory_hostname }}'
+ username: '{{ vcsa_username }}'
+ password: '{{ vcsa_password }}'
+ switch: dvSwitch
+ lacp:
+ status: enabled
+ mode: active
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: information about performed operation
+ returned: always
+ type: str
+ sample: {
+ "adv_block_ports": true,
+ "adv_netflow": false,
+ "adv_reset_at_disconnect": true,
+ "adv_traffic_filtering": false,
+ "adv_vendor_conf": false,
+ "adv_vlan": false,
+ "block_all_ports": false,
+ "changed": false,
+ "description": null,
+ "dvswitch": "dvSwitch",
+ "lacp_status": "disabled",
+ "lacp_status_previous": "enabled",
+ "name": "dvSwitch-DVUplinks",
+ "netflow_enabled": false,
+ "result": "Uplink portgroup already configured properly",
+ "vlan_trunk_range": [
+ "2-3967",
+ "4049-4092"
+ ]
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task
+)
+
+
+class VMwareDvSwitchUplinkPortgroup(PyVmomi):
+ """Class to manage a uplink portgroup on a Distributed Virtual Switch"""
+
+ def __init__(self, module):
+ super(VMwareDvSwitchUplinkPortgroup, self).__init__(module)
+ self.switch_name = self.module.params['switch']
+ self.uplink_pg_name = self.params['name']
+ self.uplink_pg_description = self.params['description']
+ self.uplink_pg_reset = self.params['advanced'].get('port_config_reset_at_disconnect')
+ self.uplink_pg_block_ports = self.params['advanced'].get('block_override')
+ self.uplink_pg_vendor_conf = self.params['advanced'].get('vendor_config_override')
+ self.uplink_pg_vlan = self.params['advanced'].get('vlan_override')
+ self.uplink_pg_netflow = self.params['advanced'].get('netflow_override')
+ self.uplink_pg_tf = self.params['advanced'].get('traffic_filter_override')
+ self.uplink_pg_vlan_trunk_range = self.params['vlan_trunk_range']
+ self.uplink_pg_netflow_enabled = self.params['netflow_enabled']
+ self.uplink_pg_block_all_ports = self.params['block_all_ports']
+ self.lacp_status = self.params['lacp'].get('status')
+ self.lacp_mode = self.params['lacp'].get('mode')
+ self.dvs = find_dvs_by_name(self.content, self.switch_name)
+ if self.dvs is None:
+ self.module.fail_json(msg="Failed to find DVS %s" % self.switch_name)
+ self.support_mode = self.dvs.config.lacpApiVersion
+
+ def ensure(self):
+ """Manage uplink portgroup"""
+ changed = changed_uplink_pg_policy = changed_vlan_trunk_range = changed_lacp = False
+ results = dict(changed=changed)
+ results['dvswitch'] = self.switch_name
+ changed_list = []
+ message = ''
+
+ uplink_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+ # Use the same version in the new spec; The version will be increased by one by the API automatically
+ uplink_pg_spec.configVersion = self.dvs.config.uplinkPortgroup[0].config.configVersion
+ uplink_pg_config = self.dvs.config.uplinkPortgroup[0].config
+
+ # Check name
+ if self.uplink_pg_name:
+ results['name'] = self.uplink_pg_name
+ if uplink_pg_config.name != self.uplink_pg_name:
+ changed = True
+ changed_list.append("name")
+ results['name_previous'] = uplink_pg_config.name
+ uplink_pg_spec.name = self.uplink_pg_name
+ else:
+ results['name'] = uplink_pg_config.name
+
+ # Check description
+ results['description'] = self.uplink_pg_description
+ if uplink_pg_config.description != self.uplink_pg_description:
+ changed = True
+ changed_list.append("description")
+ results['description_previous'] = uplink_pg_config.description
+ uplink_pg_spec.description = self.uplink_pg_description
+
+ # Check port policies
+ results['adv_reset_at_disconnect'] = self.uplink_pg_reset
+ results['adv_block_ports'] = self.uplink_pg_block_ports
+ results['adv_vendor_conf'] = self.uplink_pg_vendor_conf
+ results['adv_vlan'] = self.uplink_pg_vlan
+ results['adv_netflow'] = self.uplink_pg_netflow
+ results['adv_traffic_filtering'] = self.uplink_pg_tf
+ uplink_pg_policy_spec = vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy()
+ uplink_pg_policy_spec.portConfigResetAtDisconnect = self.uplink_pg_reset
+ uplink_pg_policy_spec.blockOverrideAllowed = self.uplink_pg_block_ports
+ uplink_pg_policy_spec.vendorConfigOverrideAllowed = self.uplink_pg_vendor_conf
+ uplink_pg_policy_spec.vlanOverrideAllowed = self.uplink_pg_vlan
+ uplink_pg_policy_spec.ipfixOverrideAllowed = self.uplink_pg_netflow
+ uplink_pg_policy_spec.trafficFilterOverrideAllowed = self.uplink_pg_tf
+ # There's no information available if the following option are deprecated, but
+ # they aren't visible in the vSphere Client
+ uplink_pg_policy_spec.shapingOverrideAllowed = False
+ uplink_pg_policy_spec.livePortMovingAllowed = False
+ uplink_pg_policy_spec.uplinkTeamingOverrideAllowed = False
+ uplink_pg_policy_spec.macManagementOverrideAllowed = False
+ uplink_pg_policy_spec.networkResourcePoolOverrideAllowed = False
+ # Check policies
+ if uplink_pg_config.policy.portConfigResetAtDisconnect != self.uplink_pg_reset:
+ changed_uplink_pg_policy = True
+ results['adv_reset_at_disconnect_previous'] = uplink_pg_config.policy.portConfigResetAtDisconnect
+ if uplink_pg_config.policy.blockOverrideAllowed != self.uplink_pg_block_ports:
+ changed_uplink_pg_policy = True
+ results['adv_block_ports_previous'] = uplink_pg_config.policy.blockOverrideAllowed
+ if uplink_pg_config.policy.vendorConfigOverrideAllowed != self.uplink_pg_vendor_conf:
+ changed_uplink_pg_policy = True
+ results['adv_vendor_conf_previous'] = uplink_pg_config.policy.vendorConfigOverrideAllowed
+ if uplink_pg_config.policy.vlanOverrideAllowed != self.uplink_pg_vlan:
+ changed_uplink_pg_policy = True
+ results['adv_vlan_previous'] = uplink_pg_config.policy.vlanOverrideAllowed
+ if uplink_pg_config.policy.ipfixOverrideAllowed != self.uplink_pg_netflow:
+ changed_uplink_pg_policy = True
+ results['adv_netflow_previous'] = uplink_pg_config.policy.ipfixOverrideAllowed
+ if uplink_pg_config.policy.trafficFilterOverrideAllowed != self.uplink_pg_tf:
+ changed_uplink_pg_policy = True
+ results['adv_traffic_filtering_previous'] = uplink_pg_config.policy.trafficFilterOverrideAllowed
+ if changed_uplink_pg_policy:
+ changed = True
+ changed_list.append("advanced")
+ uplink_pg_spec.policy = uplink_pg_policy_spec
+
+ uplink_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+
+ # Check VLAN trunk
+ results['vlan_trunk_range'] = self.uplink_pg_vlan_trunk_range
+ vlan_id_ranges = self.uplink_pg_vlan_trunk_range
+ trunk_vlan_spec = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec()
+ vlan_id_list = []
+ for vlan_id_range in vlan_id_ranges:
+ vlan_id_range_found = False
+ vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range(vlan_id_range)
+ # Check if range is already configured
+ for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
+ if current_vlan_id_range.start == int(vlan_id_start) and current_vlan_id_range.end == int(vlan_id_end):
+ vlan_id_range_found = True
+ break
+ if vlan_id_range_found is False:
+ changed_vlan_trunk_range = True
+ vlan_id_list.append(
+ vim.NumericRange(start=int(vlan_id_start), end=int(vlan_id_end))
+ )
+ # Check if range needs to be removed
+ for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
+ vlan_id_range_found = False
+ for vlan_id_range in vlan_id_ranges:
+ vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range(vlan_id_range)
+ if (current_vlan_id_range.start == int(vlan_id_start)
+ and current_vlan_id_range.end == int(vlan_id_end)):
+ vlan_id_range_found = True
+ break
+ if vlan_id_range_found is False:
+ changed_vlan_trunk_range = True
+ trunk_vlan_spec.vlanId = vlan_id_list
+ if changed_vlan_trunk_range:
+ changed = True
+ changed_list.append("vlan trunk range")
+ current_vlan_id_list = []
+ for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId:
+ if current_vlan_id_range.start == current_vlan_id_range.end:
+ current_vlan_id_range_string = current_vlan_id_range.start
+ else:
+ current_vlan_id_range_string = '-'.join(
+ [str(current_vlan_id_range.start), str(current_vlan_id_range.end)]
+ )
+ current_vlan_id_list.append(current_vlan_id_range_string)
+ results['vlan_trunk_range_previous'] = current_vlan_id_list
+ uplink_pg_spec.defaultPortConfig.vlan = trunk_vlan_spec
+
+ # Check LACP
+ lacp_support_mode = self.get_lacp_support_mode(self.support_mode)
+ if lacp_support_mode == 'basic':
+ results['lacp_status'] = self.lacp_status
+ lacp_spec = vim.dvs.VmwareDistributedVirtualSwitch.UplinkLacpPolicy()
+ lacp_enabled = False
+ if self.lacp_status == 'enabled':
+ lacp_enabled = True
+ if uplink_pg_config.defaultPortConfig.lacpPolicy.enable.value != lacp_enabled:
+ changed_lacp = True
+ changed_list.append("lacp status")
+ if uplink_pg_config.defaultPortConfig.lacpPolicy.enable.value:
+ results['lacp_status_previous'] = 'enabled'
+ else:
+ results['lacp_status_previous'] = 'disabled'
+ lacp_spec.enable = vim.BoolPolicy()
+ lacp_spec.enable.inherited = False
+ lacp_spec.enable.value = lacp_enabled
+ if lacp_enabled and uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value != self.lacp_mode:
+ results['lacp_mode'] = self.lacp_mode
+ changed_lacp = True
+ changed_list.append("lacp mode")
+ results['lacp_mode_previous'] = uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value
+ lacp_spec.mode = vim.StringPolicy()
+ lacp_spec.mode.inherited = False
+ lacp_spec.mode.value = self.lacp_mode
+ if changed_lacp:
+ changed = True
+ uplink_pg_spec.defaultPortConfig.lacpPolicy = lacp_spec
+
+ # Check NetFlow
+ results['netflow_enabled'] = self.uplink_pg_netflow_enabled
+ netflow_enabled_spec = vim.BoolPolicy()
+ netflow_enabled_spec.inherited = False
+ netflow_enabled_spec.value = self.uplink_pg_netflow_enabled
+ if uplink_pg_config.defaultPortConfig.ipfixEnabled.value != self.uplink_pg_netflow_enabled:
+ changed = True
+ results['netflow_enabled_previous'] = uplink_pg_config.defaultPortConfig.ipfixEnabled.value
+ changed_list.append("netflow")
+ uplink_pg_spec.defaultPortConfig.ipfixEnabled = netflow_enabled_spec
+
+ # TODO: Check Traffic filtering and marking
+
+ # Check Block all ports
+ results['block_all_ports'] = self.uplink_pg_block_all_ports
+ block_all_ports_spec = vim.BoolPolicy()
+ block_all_ports_spec.inherited = False
+ block_all_ports_spec.value = self.uplink_pg_block_all_ports
+ if uplink_pg_config.defaultPortConfig.blocked.value != self.uplink_pg_block_all_ports:
+ changed = True
+ changed_list.append("block all ports")
+ results['block_all_ports_previous'] = uplink_pg_config.defaultPortConfig.blocked.value
+ uplink_pg_spec.defaultPortConfig.blocked = block_all_ports_spec
+
+ if changed:
+ if self.module.check_mode:
+ changed_suffix = ' would be changed'
+ else:
+ changed_suffix = ' changed'
+ if len(changed_list) > 2:
+ message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
+ elif len(changed_list) == 2:
+ message = ' and '.join(changed_list)
+ elif len(changed_list) == 1:
+ message = changed_list[0]
+ message += changed_suffix
+ if not self.module.check_mode:
+ try:
+ task = self.dvs.config.uplinkPortgroup[0].ReconfigureDVPortgroup_Task(uplink_pg_spec)
+ wait_for_task(task)
+ except TaskError as invalid_argument:
+ self.module.fail_json(msg="Failed to update uplink portgroup : %s" % to_native(invalid_argument))
+ else:
+ message = "Uplink portgroup already configured properly"
+ results['changed'] = changed
+ results['result'] = message
+
+ self.module.exit_json(**results)
+
+ @staticmethod
+ def get_vlan_ids_from_range(vlan_id_range):
+ """Get start and end VLAN ID from VLAN ID range"""
+ try:
+ vlan_id_start, vlan_id_end = vlan_id_range.split('-')
+ except (AttributeError, TypeError):
+ vlan_id_start = vlan_id_end = vlan_id_range
+ except ValueError:
+ vlan_id_start = vlan_id_end = vlan_id_range.strip()
+ return vlan_id_start, vlan_id_end
+
+ @staticmethod
+ def get_lacp_support_mode(mode):
+ """Get LACP support mode"""
+ return_mode = None
+ if mode == 'basic':
+ return_mode = 'singleLag'
+ elif mode == 'enhanced':
+ return_mode = 'multipleLag'
+ elif mode == 'singleLag':
+ return_mode = 'basic'
+ elif mode == 'multipleLag':
+ return_mode = 'enhanced'
+ return return_mode
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ switch=dict(required=True, aliases=['dvswitch']),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ advanced=dict(
+ type='dict',
+ options=dict(
+ port_config_reset_at_disconnect=dict(type='bool', default=True),
+ block_override=dict(type='bool', default=True),
+ vendor_config_override=dict(type='bool', default=False),
+ vlan_override=dict(type='bool', default=False),
+ netflow_override=dict(type='bool', default=False),
+ traffic_filter_override=dict(type='bool', default=False),
+ ),
+ default=dict(
+ port_config_reset_at_disconnect=True,
+ block_override=True,
+ vendor_config_override=False,
+ vlan_override=False,
+ netflow_override=False,
+ traffic_filter_override=False,
+ ),
+ aliases=['port_policy'],
+ ),
+ lacp=dict(
+ type='dict',
+ options=dict(
+ status=dict(type='str', choices=['enabled', 'disabled'], default='disabled'),
+ mode=dict(type='str', choices=['active', 'passive'], default='passive'),
+ ),
+ default=dict(
+ status='disabled',
+ mode='passive',
+ ),
+ ),
+ vlan_trunk_range=dict(type='list', default=['0-4094'], elements='str'),
+ netflow_enabled=dict(type='bool', default=False),
+ block_all_ports=dict(type='bool', default=False),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ vmware_dvswitch_uplink_pg = VMwareDvSwitchUplinkPortgroup(module)
+ vmware_dvswitch_uplink_pg.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_evc_mode.py b/ansible_collections/community/vmware/plugins/modules/vmware_evc_mode.py
new file mode 100644
index 000000000..11f9b73ba
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_evc_mode.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Michael Tipton <mike () ibeta.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_evc_mode
+short_description: Enable/Disable EVC mode on vCenter
+description:
+ - This module can be used to enable/disable EVC mode on vCenter.
+author:
+ - Michael Tipton (@castawayegr)
+options:
+ datacenter_name:
+ description:
+ - The name of the datacenter the cluster belongs to that you want to enable or disable EVC mode on.
+ required: true
+ type: str
+ aliases:
+ - datacenter
+ cluster_name:
+ description:
+ - The name of the cluster to enable or disable EVC mode on.
+ required: true
+ type: str
+ aliases:
+ - cluster
+ evc_mode:
+ description:
+ - Required for C(state=present).
+ - The EVC mode to enable or disable on the cluster. (intel-broadwell, intel-nehalem, intel-merom, etc.).
+ type: str
+ state:
+ description:
+ - Add or remove EVC mode.
+ choices: [absent, present]
+ default: present
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+ - name: Enable EVC Mode
+ community.vmware.vmware_evc_mode:
+ hostname: "{{ groups['vcsa'][0] }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ site_password }}"
+ datacenter_name: "{{ datacenter_name }}"
+ cluster_name: "{{ cluster_name }}"
+ evc_mode: "intel-broadwell"
+ state: present
+ delegate_to: localhost
+ register: enable_evc
+
+ - name: Disable EVC Mode
+ community.vmware.vmware_evc_mode:
+ hostname: "{{ groups['vcsa'][0] }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ site_password }}"
+ datacenter_name: "{{ datacenter_name }}"
+ cluster_name: "{{ cluster_name }}"
+ state: absent
+ delegate_to: localhost
+ register: disable_evc
+'''
+
+RETURN = r'''
+result:
+ description: information about performed operation
+ returned: always
+ type: str
+ sample: "EVC Mode for 'intel-broadwell' has been enabled."
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ find_datacenter_by_name,
+ vmware_argument_spec,
+ wait_for_task,
+ TaskError)
+
+
+class VMwareEVC(PyVmomi):
+ def __init__(self, module):
+ super(VMwareEVC, self).__init__(module)
+ self.cluster_name = module.params['cluster_name']
+ self.evc_mode = module.params['evc_mode']
+ self.datacenter_name = module.params['datacenter_name']
+ self.desired_state = module.params['state']
+ self.datacenter = None
+ self.cluster = None
+
+ def process_state(self):
+ """
+ Manage internal states of evc
+ """
+ evc_states = {
+ 'absent': {
+ 'present': self.state_disable_evc,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_update_evc,
+ 'absent': self.state_enable_evc,
+ }
+ }
+ current_state = self.check_evc_configuration()
+ # Based on the desired_state and the current_state call
+ # the appropriate method from the dictionary
+ evc_states[self.desired_state][current_state]()
+
+ def check_evc_configuration(self):
+ """
+ Check evc configuration
+ Returns: 'Present' if evc enabled, else 'absent'
+ """
+ try:
+ self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter '%s' does not exist." % self.datacenter_name)
+ self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter)
+
+ if self.cluster is None:
+ self.module.fail_json(msg="Cluster '%s' does not exist." % self.cluster_name)
+ self.evcm = self.cluster.EvcManager()
+
+ if not self.evcm:
+ self.module.fail_json(msg="Unable to get EVC manager for cluster '%s'." % self.cluster_name)
+ self.evc_state = self.evcm.evcState
+ self.current_evc_mode = self.evc_state.currentEVCModeKey
+
+ if not self.current_evc_mode:
+ return 'absent'
+
+ return 'present'
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to check configuration"
+ " due to generic exception %s" % to_native(generic_exc))
+
+ def state_exit_unchanged(self):
+ """
+ Exit without any change
+ """
+ self.module.exit_json(changed=False, msg="EVC Mode is already disabled on cluster '%s'." % self.cluster_name)
+
+ def state_update_evc(self):
+ """
+ Update EVC Mode
+ """
+ changed, result = False, None
+ try:
+ if not self.module.check_mode and self.current_evc_mode != self.evc_mode:
+ evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode)
+ changed, result = wait_for_task(evc_task)
+ if self.module.check_mode and self.current_evc_mode != self.evc_mode:
+ changed = True
+ if self.current_evc_mode == self.evc_mode:
+ self.module.exit_json(changed=changed, msg="EVC Mode is already set to '%(evc_mode)s' on '%(cluster_name)s'." % self.params)
+ self.module.exit_json(changed=changed, msg="EVC Mode has been updated to '%(evc_mode)s' on '%(cluster_name)s'." % self.params)
+ except TaskError as invalid_argument:
+ self.module.fail_json(msg="Failed to update EVC mode: %s" % to_native(invalid_argument))
+
+ def state_enable_evc(self):
+ """
+ Enable EVC Mode
+ """
+ changed, result = False, None
+ try:
+ if not self.module.check_mode:
+ evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode)
+ changed, result = wait_for_task(evc_task)
+ if self.module.check_mode:
+ changed = True
+ self.module.exit_json(changed=changed, msg="EVC Mode for '%(evc_mode)s' has been enabled on '%(cluster_name)s'." % self.params)
+ except TaskError as invalid_argument:
+ self.module.fail_json(msg="Failed to enable EVC mode: %s" % to_native(invalid_argument))
+
+ def state_disable_evc(self):
+ """
+ Disable EVC Mode
+ """
+ changed, result = False, None
+ try:
+ if not self.module.check_mode:
+ evc_task = self.evcm.DisableEvcMode_Task()
+ changed, result = wait_for_task(evc_task)
+ if self.module.check_mode:
+ changed = True
+ self.module.exit_json(changed=changed, msg="EVC Mode has been disabled on cluster '%s'." % self.cluster_name)
+ except TaskError as invalid_argument:
+ self.module.fail_json(msg="Failed to disable EVC mode: %s" % to_native(invalid_argument))
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ cluster_name=dict(type='str', required=True, aliases=['cluster']),
+ datacenter_name=dict(type='str', required=True, aliases=['datacenter']),
+ evc_mode=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ['state', 'present', ['evc_mode']]
+ ]
+ )
+
+ vmware_evc = VMwareEVC(module)
+ vmware_evc.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_export_ovf.py b/ansible_collections/community/vmware/plugins/modules/vmware_export_ovf.py
new file mode 100644
index 000000000..d447ea983
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_export_ovf.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Diane Wang <dianew@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_export_ovf
+short_description: Exports a VMware virtual machine to an OVF file, device files and a manifest file
+description: >
+ This module can be used to export a VMware virtual machine to OVF template from vCenter server or ESXi host.
+author:
+- Diane Wang (@Tomorrow9) <dianew@vmware.com>
+notes: []
+options:
+ name:
+ description:
+ - Name of the virtual machine to export.
+ - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - Uuid of the virtual machine to export.
+ - This is a required parameter, if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ datacenter:
+ default: ha-datacenter
+ description:
+ - Datacenter name of the virtual machine to export.
+ - This parameter is case sensitive.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute path to find the specified guest.
+ - The folder should include the datacenter. ESX datacenter is ha-datacenter.
+ - This parameter is case sensitive.
+ - 'If multiple machines are found with same name, this parameter is used to identify'
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ export_dir:
+ description:
+ - Absolute path to place the exported files on the server running this task, must have write permission.
+ - If folder not exist will create it, also create a folder under this path named with VM name.
+ required: true
+ type: path
+ export_with_images:
+ default: false
+ description:
+ - Export an ISO image of the media mounted on the CD/DVD Drive within the virtual machine.
+ type: bool
+ export_with_extraconfig:
+ type: bool
+ default: false
+ description:
+ - All extra configuration options are exported for a virtual machine.
+ version_added: '2.0.0'
+ download_timeout:
+ description:
+ - The user defined timeout in second of exporting file.
+ - If the vmdk file is too large, you can increase the value.
+ default: 30
+ type: int
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- community.vmware.vmware_export_ovf:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ name: '{{ vm_name }}'
+ export_with_images: true
+ export_dir: /path/to/ovf_template/
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+instance:
+ description: list of the exported files, if exported from vCenter server, device file is not named with vm name
+ returned: always
+ type: dict
+ sample: None
+'''
+
+import os
+import hashlib
+from time import sleep
+from threading import Thread
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text, to_bytes
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+try:
+ from pyVmomi import vim
+ from pyVim import connect
+except ImportError:
+ pass
+
+
+class LeaseProgressUpdater(Thread):
+ def __init__(self, http_nfc_lease, update_interval):
+ Thread.__init__(self)
+ self._running = True
+ self.httpNfcLease = http_nfc_lease
+ self.updateInterval = update_interval
+ self.progressPercent = 0
+
+ def set_progress_percent(self, progress_percent):
+ self.progressPercent = progress_percent
+
+ def stop(self):
+ self._running = False
+
+ def run(self):
+ while self._running:
+ try:
+ if self.httpNfcLease.state == vim.HttpNfcLease.State.done:
+ return
+ self.httpNfcLease.HttpNfcLeaseProgress(self.progressPercent)
+ sleep_sec = 0
+ while True:
+ if self.httpNfcLease.state == vim.HttpNfcLease.State.done or self.httpNfcLease.state == vim.HttpNfcLease.State.error:
+ return
+ sleep_sec += 1
+ sleep(1)
+ if sleep_sec == self.updateInterval:
+ break
+ except Exception:
+ return
+
+
+class VMwareExportVmOvf(PyVmomi):
+ def __init__(self, module):
+ super(VMwareExportVmOvf, self).__init__(module)
+ self.mf_file = ''
+ self.ovf_dir = ''
+ # set read device content chunk size to 2 MB
+ self.chunk_size = 2 * 2 ** 20
+ # set lease progress update interval to 15 seconds
+ self.lease_interval = 15
+ self.facts = {'device_files': []}
+ self.download_timeout = None
+
+ def create_export_dir(self, vm_obj):
+ self.ovf_dir = os.path.join(self.params['export_dir'], vm_obj.name)
+ if not os.path.exists(self.ovf_dir):
+ try:
+ os.makedirs(self.ovf_dir)
+ except OSError as err:
+ self.module.fail_json(msg='Exception caught when create folder %s, with error %s'
+ % (self.ovf_dir, to_text(err)))
+ self.mf_file = os.path.join(self.ovf_dir, vm_obj.name + '.mf')
+
+ def download_device_files(self, headers, temp_target_disk, device_url, lease_updater, total_bytes_written,
+ total_bytes_to_write):
+ mf_content = 'SHA256(' + os.path.basename(temp_target_disk) + ')= '
+ sha256_hash = hashlib.sha256()
+ response = None
+
+ with open(self.mf_file, 'a') as mf_handle:
+ with open(temp_target_disk, 'wb') as handle:
+ try:
+ response = open_url(device_url, headers=headers, validate_certs=False, timeout=self.download_timeout)
+ except Exception as err:
+ lease_updater.httpNfcLease.HttpNfcLeaseAbort()
+ lease_updater.stop()
+ self.module.fail_json(msg='Exception caught when getting %s, %s' % (device_url, to_text(err)))
+ if not response:
+ lease_updater.httpNfcLease.HttpNfcLeaseAbort()
+ lease_updater.stop()
+ self.module.fail_json(msg='Getting %s failed' % device_url)
+ if response.getcode() >= 400:
+ lease_updater.httpNfcLease.HttpNfcLeaseAbort()
+ lease_updater.stop()
+ self.module.fail_json(msg='Getting %s return code %d' % (device_url, response.getcode()))
+ current_bytes_written = 0
+ block = response.read(self.chunk_size)
+ while block:
+ handle.write(block)
+ sha256_hash.update(block)
+ handle.flush()
+ os.fsync(handle.fileno())
+ current_bytes_written += len(block)
+ block = response.read(self.chunk_size)
+ written_percent = ((current_bytes_written + total_bytes_written) * 100) / total_bytes_to_write
+ lease_updater.progressPercent = int(written_percent)
+ mf_handle.write(mf_content + sha256_hash.hexdigest() + '\n')
+ self.facts['device_files'].append(temp_target_disk)
+ return current_bytes_written
+
+ def export_to_ovf_files(self, vm_obj):
+ self.create_export_dir(vm_obj=vm_obj)
+ export_with_iso = False
+ if self.params['export_with_images']:
+ export_with_iso = True
+ self.download_timeout = self.params['download_timeout']
+
+ ovf_files = []
+ # get http nfc lease firstly
+ http_nfc_lease = vm_obj.ExportVm()
+ # create a thread to track file download progress
+ lease_updater = LeaseProgressUpdater(http_nfc_lease, self.lease_interval)
+ total_bytes_written = 0
+ # total storage space occupied by the virtual machine across all datastores
+ total_bytes_to_write = vm_obj.summary.storage.unshared
+ # new deployed VM with no OS installed
+ if total_bytes_to_write == 0:
+ total_bytes_to_write = vm_obj.summary.storage.committed
+ if total_bytes_to_write == 0:
+ http_nfc_lease.HttpNfcLeaseAbort()
+ self.module.fail_json(msg='Total storage space occupied by the VM is 0.')
+ headers = {'Accept': 'application/x-vnd.vmware-streamVmdk'}
+ cookies = connect.GetStub().cookie
+ if cookies:
+ headers['Cookie'] = cookies
+ lease_updater.start()
+ try:
+ while True:
+ if http_nfc_lease.state == vim.HttpNfcLease.State.ready:
+ for deviceUrl in http_nfc_lease.info.deviceUrl:
+ file_download = False
+ if deviceUrl.targetId and deviceUrl.disk:
+ file_download = True
+ elif deviceUrl.url.split('/')[-1].split('.')[-1] == 'iso':
+ if export_with_iso:
+ file_download = True
+ elif deviceUrl.url.split('/')[-1].split('.')[-1] == 'nvram':
+ if self.host_version_at_least(version=(6, 7, 0), vm_obj=vm_obj):
+ file_download = True
+ else:
+ continue
+ device_file_name = deviceUrl.url.split('/')[-1]
+ # device file named disk-0.iso, disk-1.vmdk, disk-2.vmdk, replace 'disk' with vm name
+ if device_file_name.split('.')[0][0:5] == "disk-":
+ device_file_name = device_file_name.replace('disk', vm_obj.name)
+ temp_target_disk = os.path.join(self.ovf_dir, device_file_name)
+ device_url = deviceUrl.url
+ # if export from ESXi host, replace * with hostname in url
+ # e.g., https://*/ha-nfc/5289bf27-da99-7c0e-3978-8853555deb8c/disk-1.vmdk
+ if '*' in device_url:
+ device_url = device_url.replace('*', self.params['hostname'])
+ if file_download:
+ current_bytes_written = self.download_device_files(headers=headers,
+ temp_target_disk=temp_target_disk,
+ device_url=device_url,
+ lease_updater=lease_updater,
+ total_bytes_written=total_bytes_written,
+ total_bytes_to_write=total_bytes_to_write)
+ total_bytes_written += current_bytes_written
+ ovf_file = vim.OvfManager.OvfFile()
+ ovf_file.deviceId = deviceUrl.key
+ ovf_file.path = device_file_name
+ ovf_file.size = current_bytes_written
+ ovf_files.append(ovf_file)
+ break
+ if http_nfc_lease.state == vim.HttpNfcLease.State.initializing:
+ sleep(2)
+ continue
+ if http_nfc_lease.state == vim.HttpNfcLease.State.error:
+ lease_updater.stop()
+ self.module.fail_json(msg='Get HTTP NFC lease error %s.' % http_nfc_lease.state.error[0].fault)
+
+ # generate ovf file
+ ovf_manager = self.content.ovfManager
+ ovf_descriptor_name = vm_obj.name
+ ovf_parameters = vim.OvfManager.CreateDescriptorParams()
+ ovf_parameters.name = ovf_descriptor_name
+ ovf_parameters.ovfFiles = ovf_files
+ if self.params['export_with_extraconfig']:
+ ovf_parameters.exportOption = ['extraconfig']
+ if self.params['export_with_images']:
+ ovf_parameters.includeImageFiles = True
+ vm_descriptor_result = ovf_manager.CreateDescriptor(obj=vm_obj, cdp=ovf_parameters)
+ if vm_descriptor_result.error:
+ http_nfc_lease.HttpNfcLeaseAbort()
+ lease_updater.stop()
+ self.module.fail_json(msg='Create VM descriptor file error %s.' % vm_descriptor_result.error)
+ else:
+ vm_descriptor = vm_descriptor_result.ovfDescriptor
+ ovf_descriptor_path = os.path.join(self.ovf_dir, ovf_descriptor_name + '.ovf')
+ sha256_hash = hashlib.sha256()
+ with open(self.mf_file, 'a') as mf_handle:
+ with open(ovf_descriptor_path, 'w') as handle:
+ handle.write(vm_descriptor)
+ sha256_hash.update(to_bytes(vm_descriptor))
+ mf_handle.write('SHA256(' + os.path.basename(ovf_descriptor_path) + ')= ' + sha256_hash.hexdigest() + '\n')
+ http_nfc_lease.HttpNfcLeaseProgress(100)
+ # self.facts = http_nfc_lease.HttpNfcLeaseGetManifest()
+ http_nfc_lease.HttpNfcLeaseComplete()
+ lease_updater.stop()
+ self.facts.update({'manifest': self.mf_file, 'ovf_file': ovf_descriptor_path})
+ except Exception as err:
+ kwargs = {
+ 'changed': False,
+ 'failed': True,
+ 'msg': "get exception: %s" % to_text(err),
+ }
+ http_nfc_lease.HttpNfcLeaseAbort()
+ lease_updater.stop()
+ return kwargs
+ return {'changed': True, 'failed': False, 'instance': self.facts}
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ folder=dict(type='str'),
+ datacenter=dict(type='str', default='ha-datacenter'),
+ export_dir=dict(type='path', required=True),
+ export_with_images=dict(type='bool', default=False),
+ export_with_extraconfig=dict(type='bool', default=False),
+ download_timeout=dict(type='int', default=30),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid', 'moid'],
+ ],
+ )
+ pyv = VMwareExportVmOvf(module)
+ vm = pyv.get_vm()
+ if vm:
+ vm_facts = pyv.gather_facts(vm)
+ vm_power_state = vm_facts['hw_power_status'].lower()
+ if vm_power_state != 'poweredoff':
+ module.fail_json(msg='VM state should be poweredoff to export')
+ results = pyv.export_to_ovf_files(vm_obj=vm)
+ module.exit_json(**results)
+ else:
+ module.fail_json(msg='The specified virtual machine not found')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_first_class_disk.py b/ansible_collections/community/vmware/plugins/modules/vmware_first_class_disk.py
new file mode 100644
index 000000000..db224b7f8
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_first_class_disk.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, Mario Lenz <m@riolenz.de>
+# Copyright: (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: vmware_first_class_disk
+short_description: Manage VMware vSphere First Class Disks
+description:
+ - This module can be used to manage (create, delete, resize) VMware vSphere First Class Disks.
+author:
+- Mario Lenz (@mariolenz)
+options:
+ datacenter_name:
+ description: The name of the datacenter.
+ type: str
+ datastore_name:
+ description: Name of datastore or datastore cluster to be used for the disk.
+ required: true
+ type: str
+ disk_name:
+ description: The name of the disk.
+ required: true
+ type: str
+ size:
+ description:
+ - Disk storage size, an integer plus a unit.
+ - There is no space allowed in between size number and unit.
+ - Allowed units are MB, GB and TB.
+ - 'Examples:'
+ - ' size: 2048MB'
+ - ' size: 10GB'
+ - ' size: 1TB'
+ type: str
+ state:
+ description: If the disk should be present or absent.
+ choices: [ present, absent ]
+ default: present
+ type: str
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Create Disk
+ community.vmware.vmware_first_class_disk:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datastore_name: '{{ datastore_name }}'
+ disk_name: '1GBDisk'
+ size: '1GB'
+ state: present
+ delegate_to: localhost
+
+- name: Delete Disk
+ community.vmware.vmware_first_class_disk:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datastore_name: '{{ datastore_name }}'
+ disk_name: 'FirstClassDisk'
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+first_class_disk:
+ description: First-class disk returned when created, deleted or changed
+ returned: changed
+ type: dict
+ sample: >
+ {
+ "name": "1GBDisk"
+ "datastore_name": "DS0"
+ "size_mb": "1024"
+ "state": "present"
+ }
+'''
+
+import re
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, TaskError, vmware_argument_spec, wait_for_task
+from ansible.module_utils._text import to_native
+
+
+class FirstClassDisk(PyVmomi):
+ def __init__(self, module):
+ super(FirstClassDisk, self).__init__(module)
+ self.datacenter_name = self.params['datacenter_name']
+ self.datastore_name = self.params['datastore_name']
+ self.disk_name = self.params['disk_name']
+ self.desired_state = module.params['state']
+
+ self.size_mb = None
+ if self.params['size']:
+ size_regex = re.compile(r'(\d+)([MGT]B)')
+ disk_size_m = size_regex.match(self.params['size'])
+ if disk_size_m:
+ number = disk_size_m.group(1)
+ unit = disk_size_m.group(2)
+ else:
+ self.module.fail_json(msg="Failed to parse disk size, please review value provided using documentation.")
+
+ number = int(number)
+ if unit == "GB":
+ self.size_mb = 1024 * number
+ elif unit == "TB":
+ self.size_mb = 1048576 * number
+ else:
+ self.size_mb = number
+
+ self.datastore_obj = self.find_datastore_by_name(datastore_name=self.datastore_name, datacenter_name=self.datacenter_name)
+ if not self.datastore_obj:
+ self.module.fail_json(msg='Failed to find datastore %s.' % self.datastore_name)
+
+ self.disk = self.find_first_class_disk_by_name(self.disk_name, self.datastore_obj)
+
+ def create_fcd_result(self, state):
+ result = dict(
+ name=self.disk.config.name,
+ datastore_name=self.disk.config.backing.datastore.name,
+ size_mb=self.disk.config.capacityInMB,
+ state=state
+ )
+
+ return result
+
+ def create(self):
+ result = dict(changed=False)
+ if not self.disk:
+ result['changed'] = True
+ if not self.module.check_mode:
+ backing_spec = vim.vslm.CreateSpec.DiskFileBackingSpec()
+ backing_spec.datastore = self.datastore_obj
+
+ vslm_create_spec = vim.vslm.CreateSpec()
+ vslm_create_spec.backingSpec = backing_spec
+ vslm_create_spec.capacityInMB = self.size_mb
+ vslm_create_spec.name = self.disk_name
+
+ try:
+ if self.is_vcenter():
+ task = self.content.vStorageObjectManager.CreateDisk_Task(vslm_create_spec)
+ else:
+ task = self.content.vStorageObjectManager.HostCreateDisk_Task(vslm_create_spec)
+ changed, self.disk = wait_for_task(task)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to create disk"
+ " due to generic exception %s" % to_native(generic_exc))
+
+ result['diff'] = {'before': {}, 'after': {}}
+ result['diff']['before']['first_class_disk'] = self.create_fcd_result('absent')
+ result['diff']['after']['first_class_disk'] = self.create_fcd_result('present')
+ result['first_class_disk'] = result['diff']['after']['first_class_disk']
+ else:
+ if self.size_mb < self.disk.config.capacityInMB:
+ self.module.fail_json(msg="Given disk size is smaller than current size (%dMB < %dMB). "
+ "Reducing disks is not allowed."
+ % (self.size_mb, self.disk.config.capacityInMB))
+ elif self.size_mb > self.disk.config.capacityInMB:
+ result['changed'] = True
+ if not self.module.check_mode:
+ result['diff'] = {'before': {}, 'after': {}}
+ result['diff']['before']['first_class_disk'] = self.create_fcd_result('present')
+ try:
+ if self.is_vcenter():
+ task = self.content.vStorageObjectManager.ExtendDisk_Task(self.disk.config.id,
+ self.datastore_obj,
+ self.size_mb)
+ else:
+ task = self.content.vStorageObjectManager.HostExtendDisk_Task(self.disk.config.id,
+ self.datastore_obj,
+ self.size_mb)
+ wait_for_task(task)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to increase disk size"
+ " due to generic exception %s" % to_native(generic_exc))
+
+ self.disk = self.find_first_class_disk_by_name(self.disk_name, self.datastore_obj)
+ result['diff']['after']['first_class_disk'] = self.create_fcd_result('present')
+ result['first_class_disk'] = result['diff']['after']['first_class_disk']
+
+ self.module.exit_json(**result)
+
+ def delete(self):
+ result = dict(changed=False)
+ if self.disk:
+ result['changed'] = True
+ if not self.module.check_mode:
+ result['diff'] = {'before': {}, 'after': {}}
+ result['diff']['before']['first_class_disk'] = self.create_fcd_result('present')
+ result['diff']['after']['first_class_disk'] = self.create_fcd_result('absent')
+ result['first_class_disk'] = result['diff']['after']['first_class_disk']
+
+ try:
+ if self.is_vcenter():
+ task = self.content.vStorageObjectManager.DeleteVStorageObject_Task(self.disk.config.id,
+ self.datastore_obj)
+ else:
+ task = self.content.vStorageObjectManager.HostDeleteVStorageObject_Task(self.disk.config.id,
+ self.datastore_obj)
+ wait_for_task(task)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to delete disk"
+ " due to generic exception %s" % to_native(generic_exc))
+
+ self.module.exit_json(**result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ datacenter_name=dict(type='str'),
+ datastore_name=dict(required=True, type='str'),
+ disk_name=dict(required=True, type='str'),
+ size=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[
+ ['state', 'present', ['size']]
+ ],
+ supports_check_mode=True)
+
+ first_class_disk = FirstClassDisk(module)
+
+ if first_class_disk.desired_state == 'present':
+ first_class_disk.create()
+ if first_class_disk.desired_state == 'absent':
+ first_class_disk.delete()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_folder_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_folder_info.py
new file mode 100644
index 000000000..fd99d782b
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_folder_info.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, David Hewitt <davidmhewitt@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_folder_info
+short_description: Provides information about folders in a datacenter
+description:
+- The module can be used to gather a hierarchical view of the folders that exist within a datacenter
+author:
+- David Hewitt (@davidmhewitt)
+notes:
+- C(flat_folder_info) added in VMware collection 1.4.0.
+options:
+ datacenter:
+ description:
+ - Name of the datacenter.
+ required: true
+ type: str
+ aliases: ['datacenter_name']
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Provide information about vCenter folders
+ community.vmware.vmware_folder_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: datacenter_name
+ delegate_to: localhost
+ register: vcenter_folder_info
+
+- name: Get information about folders
+ community.vmware.vmware_folder_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: 'Asia-Datacenter1'
+ register: r
+
+- name: Set Managed object ID for the given folder
+ ansible.builtin.set_fact:
+ folder_mo_id: "{{ (r.flat_folder_info | selectattr('path', 'equalto', '/Asia-Datacenter1/vm/tier1/tier2') | map(attribute='moid'))[0] }}"
+'''
+
+RETURN = r'''
+flat_folder_info:
+ description:
+ - list of dict about folders in flat structure
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "moid": "group-v3",
+ "path": "/Asia-Datacenter1/vm"
+ },
+ {
+ "moid": "group-v44",
+ "path": "/Asia-Datacenter1/vm/tier1"
+ },
+ {
+ "moid": "group-v45",
+ "path": "/Asia-Datacenter1/vm/tier1/tier2"
+ }
+ ]
+folder_info:
+ description:
+ - dict about folders
+ returned: success
+ type: dict
+ sample:
+ {
+ "datastoreFolders": {
+ "moid": "group-v10",
+ "path": "/DC01/datastore",
+ "subfolders": {
+ "Local Datastores": {
+ "path": "/DC01/datastore/Local Datastores",
+ "subfolders": {}
+ }
+ }
+ },
+ "hostFolders": {
+ "moid": "group-v21",
+ "path": "/DC01/host",
+ "subfolders": {}
+ },
+ "networkFolders": {
+ "moid": "group-v31",
+ "path": "/DC01/network",
+ "subfolders": {}
+ },
+ "vmFolders": {
+ "moid": "group-v41",
+ "path": "/DC01/vm",
+ "subfolders": {
+ "Core Infrastructure Servers": {
+ "moid": "group-v42",
+ "path": "/DC01/vm/Core Infrastructure Servers",
+ "subfolders": {
+ "Staging Network Services": {
+ "moid": "group-v43",
+ "path": "/DC01/vm/Core Infrastructure Servers/Staging Network Services",
+ "subfolders": {}
+ },
+ "VMware": {
+ "moid": "group-v44",
+ "path": "/DC01/vm/Core Infrastructure Servers/VMware",
+ "subfolders": {}
+ }
+ }
+ }
+ }
+ }
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VmwareFolderInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareFolderInfoManager, self).__init__(module)
+ self.dc_name = self.params['datacenter']
+
+ def gather_folder_info(self):
+ datacenter = self.find_datacenter_by_name(self.dc_name)
+ if datacenter is None:
+ self.module.fail_json(msg="Failed to find the datacenter %s" % self.dc_name)
+
+ folder_trees = {}
+ folder_trees['vmFolders'] = self.build_folder_tree(datacenter.vmFolder, "/%s/vm" % self.dc_name)
+ folder_trees['hostFolders'] = self.build_folder_tree(datacenter.hostFolder, "/%s/host" % self.dc_name)
+ folder_trees['networkFolders'] = self.build_folder_tree(datacenter.networkFolder, "/%s/network" % self.dc_name)
+ folder_trees['datastoreFolders'] = self.build_folder_tree(datacenter.datastoreFolder, "/%s/datastore" % self.dc_name)
+
+ flat_folder_info = self.build_flat_folder_tree(datacenter.vmFolder, '/%s/vm' % self.dc_name)
+ flat_folder_info.extend(self.build_flat_folder_tree(datacenter.hostFolder, "/%s/host" % self.dc_name))
+ flat_folder_info.extend(self.build_flat_folder_tree(datacenter.networkFolder, "/%s/network" % self.dc_name))
+ flat_folder_info.extend(self.build_flat_folder_tree(datacenter.datastoreFolder, "/%s/datastore" % self.dc_name))
+
+ self.module.exit_json(
+ changed=False,
+ folder_info=folder_trees,
+ flat_folder_info=flat_folder_info,
+ )
+
+ def build_flat_folder_tree(self, folder, path):
+ ret = []
+ tree = {
+ 'path': path,
+ 'moid': folder._moId,
+ }
+
+ ret.append(tree)
+
+ children = None
+ if hasattr(folder, 'childEntity'):
+ children = folder.childEntity
+
+ if children:
+ for child in children:
+ if child == folder:
+ continue
+ if isinstance(child, vim.Folder):
+ ret.extend(self.build_flat_folder_tree(child, "%s/%s" % (path, child.name)))
+ return ret
+
+ def build_folder_tree(self, folder, path):
+ tree = {
+ 'path': path,
+ 'subfolders': {},
+ 'moid': folder._moId,
+ }
+
+ children = None
+ if hasattr(folder, 'childEntity'):
+ children = folder.childEntity
+
+ if children:
+ for child in children:
+ if child == folder:
+ continue
+ if isinstance(child, vim.Folder):
+ ctree = self.build_folder_tree(child, "%s/%s" % (path, child.name))
+ tree['subfolders'][child.name] = dict.copy(ctree)
+ return tree
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(type='str', required=True, aliases=['datacenter_name'])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ vmware_folder_info_mgr = VmwareFolderInfoManager(module)
+ vmware_folder_info_mgr.gather_folder_info()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest.py
new file mode 100644
index 000000000..16dcb301c
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest.py
@@ -0,0 +1,3601 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This module is also sponsored by E.T.A.I. (www.etai.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest
+short_description: Manages virtual machines in vCenter
+description: >
+ This module can be used to create new virtual machines from templates or other virtual machines,
+ manage power state of virtual machine such as power on, power off, suspend, shutdown, reboot, restart etc.,
+ modify various virtual machine components like network, disk, customization etc.,
+ rename a virtual machine and remove a virtual machine with associated components.
+author:
+- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+- Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
+- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
+notes:
+ - Please make sure that the user used for M(community.vmware.vmware_guest) has the correct level of privileges.
+ - For example, following is the list of minimum privileges required by users to create virtual machines.
+ - " DataStore > Allocate Space"
+ - " Virtual Machine > Configuration > Add New Disk"
+ - " Virtual Machine > Configuration > Add or Remove Device"
+ - " Virtual Machine > Inventory > Create New"
+ - " Network > Assign Network"
+ - " Resource > Assign Virtual Machine to Resource Pool"
+ - "Module may require additional privileges as well, which may be required for gathering facts - e.g. ESXi configurations."
+ - Use SCSI disks instead of IDE when you want to expand online disks by specifying a SCSI controller.
+ - Uses SysPrep for Windows VM (depends on 'guest_id' parameter match 'win') with PyVmomi.
+ - In order to change the VM's parameters (e.g. number of CPUs), the VM must be powered off unless the hot-add
+ support is enabled and the C(state=present) must be used to apply the changes.
+ - "For additional information please visit Ansible VMware community wiki - U(https://github.com/ansible/community/wiki/VMware)."
+options:
+ state:
+ description:
+ - Specify the state the virtual machine should be in.
+ - If C(state) is set to C(present) and virtual machine exists, ensure the virtual machine configurations conforms to task arguments.
+ - If C(state) is set to C(absent) and virtual machine exists, then the specified virtual machine is removed with it's associated components.
+ - If C(state) is set to one of the following C(poweredon), C(powered-on), C(poweredoff), C(powered-off),
+ C(present), C(restarted), C(suspended) and virtual machine does not exists, virtual machine is deployed with the given parameters.
+ - If C(state) is set to C(poweredon) or C(powered-on) and virtual machine exists with powerstate other than powered on,
+ then the specified virtual machine is powered on.
+ - If C(state) is set to C(poweredoff) or C(powered-off) and virtual machine exists with powerstate other than powered off,
+ then the specified virtual machine is powered off.
+ - If C(state) is set to C(restarted) and virtual machine exists, then the virtual machine is restarted.
+ - If C(state) is set to C(suspended) and virtual machine exists, then the virtual machine is set to suspended mode.
+ - If C(state) is set to C(shutdownguest) or C(shutdown-guest) and virtual machine exists, then the virtual machine is shutdown.
+ - If C(state) is set to C(rebootguest) or C(reboot-guest) and virtual machine exists, then the virtual machine is rebooted.
+ - Powerstate C(powered-on) and C(powered-off) is added in version 2.10.
+ default: present
+ type: str
+ choices: [ absent, poweredon, powered-on, poweredoff, powered-off, present, rebootguest, reboot-guest, restarted, suspended, shutdownguest, shutdown-guest]
+ name:
+ description:
+ - Name of the virtual machine to work with.
+ - Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
+ - If multiple virtual machines with same name exists, then C(folder) is required parameter to
+ identify uniqueness of the virtual machine.
+ - This parameter is required, if C(state) is set to C(poweredon), C(powered-on), C(poweredoff), C(powered-off),
+ C(present), C(restarted), C(suspended) and virtual machine does not exists.
+ - This parameter is case sensitive.
+ type: str
+ name_match:
+ description:
+ - If multiple virtual machines matching the name, use the first or last found.
+ default: 'first'
+ choices: [ 'first', 'last' ]
+ type: str
+ uuid:
+ description:
+ - UUID of the virtual machine to manage if known, this is VMware's unique identifier.
+ - This is required if C(name) is not supplied.
+ - If virtual machine does not exists, then this parameter is ignored.
+ - Please note that a supplied UUID will be ignored on virtual machine creation, as VMware creates the UUID internally.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ template:
+ description:
+ - Template or existing virtual machine used to create new virtual machine.
+ - If this value is not set, virtual machine is created without using a template.
+ - If the virtual machine already exists, this parameter will be ignored.
+ - This parameter is case sensitive.
+ - From version 2.8 onwards, absolute path to virtual machine or template can be used.
+ aliases: [ 'template_src' ]
+ type: str
+ is_template:
+ description:
+ - Flag the instance as a template.
+ - This will mark the given virtual machine as template.
+ - Note, this may need to be done in a dedicated task invocation that is not making
+ any other changes. For example, user cannot change the state from powered-on to
+ powered-off AND save as template in the same task.
+ - See M(community.vmware.vmware_guest) source for more details.
+ default: false
+ type: bool
+ folder:
+ description:
+ - Destination folder, absolute path to find an existing guest or create the new guest.
+ - "The folder should include the datacenter. ESXi's datacenter is ha-datacenter."
+ - This parameter is case sensitive.
+ - 'If multiple machines are found with same name, this parameter is used to identify'
+ - 'uniqueness of the virtual machine. Added in Ansible 2.5.'
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ hardware:
+ type: dict
+ default: {}
+ description:
+ - "Manage virtual machine's hardware attributes."
+ - All parameters case sensitive.
+ suboptions:
+ hotadd_cpu:
+ type: bool
+ description: Allow virtual CPUs to be added while the virtual machine is running.
+ hotremove_cpu:
+ type: bool
+ description: Allow virtual CPUs to be removed while the virtual machine is running.
+ hotadd_memory:
+ type: bool
+ description: Allow memory to be added while the virtual machine is running.
+ memory_mb:
+ type: int
+ description: Amount of memory in MB.
+ num_cpus:
+ type: int
+ description:
+ - Number of CPUs.
+ - C(num_cpus) must be a multiple of C(num_cpu_cores_per_socket).
+ - For example, to create a VM with 2 sockets of 4 cores, specify C(num_cpus) as 8 and C(num_cpu_cores_per_socket) as 4.
+ num_cpu_cores_per_socket:
+ type: int
+ description: Number of Cores Per Socket.
+ cpu_shares_level:
+ type: str
+ choices: [ 'low', 'normal', 'high', 'custom' ]
+ description:
+ - The allocation level of CPU resources for the virtual machine.
+ - Valid Values are C(low), C(normal), C(high) and C(custom).
+ version_added: '3.2.0'
+ cpu_shares:
+ type: int
+ description:
+ - The number of shares of CPU allocated to this virtual machine
+ - cpu_shares_level will automatically be set to 'custom'
+ version_added: '3.2.0'
+ vpmc_enabled:
+ version_added: '3.2.0'
+ type: bool
+ description: Enable virtual CPU Performance Counters.
+ scsi:
+ type: str
+ description:
+ - Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual).
+ - C(paravirtual) is default.
+ choices: [ 'buslogic', 'lsilogic', 'lsilogicsas', 'paravirtual' ]
+ secure_boot:
+ type: bool
+ description: Whether to enable or disable (U)EFI secure boot.
+ memory_reservation_lock:
+ type: bool
+ description:
+ - If set C(true), memory resource reservation for the virtual machine.
+ max_connections:
+ type: int
+ description:
+ - Maximum number of active remote display connections for the virtual machines.
+ mem_limit:
+ type: int
+ description:
+ - The memory utilization of a virtual machine will not exceed this limit.
+ - Unit is MB.
+ mem_reservation:
+ type: int
+ description: The amount of memory resource that is guaranteed available to the virtual machine.
+ aliases: [ 'memory_reservation' ]
+ mem_shares_level:
+ type: str
+ description:
+ - The allocation level of memory resources for the virtual machine.
+ - Valid Values are C(low), C(normal), C(high) and C(custom).
+ choices: [ 'low', 'normal', 'high', 'custom' ]
+ version_added: '3.2.0'
+ mem_shares:
+ type: int
+ description:
+ - The number of shares of memory allocated to this virtual machine
+ - mem_shares_level will automatically be set to 'custom'
+ version_added: '3.2.0'
+ cpu_limit:
+ type: int
+ description:
+ - The CPU utilization of a virtual machine will not exceed this limit.
+ - Unit is MHz.
+ cpu_reservation:
+ type: int
+ description: The amount of CPU resource that is guaranteed available to the virtual machine.
+ version:
+ type: str
+ description:
+ - The Virtual machine hardware versions.
+ - Default is 10 (ESXi 5.5 and onwards).
+ - If set to C(latest), the specified virtual machine will be upgraded to the most current hardware version supported on the host.
+ - C(latest) is added in Ansible 2.10.
+ - Please check VMware documentation for correct virtual machine hardware version.
+ - Incorrect hardware version may lead to failure in deployment. If hardware version is already equal to the given.
+ boot_firmware:
+ type: str
+ description: Choose which firmware should be used to boot the virtual machine.
+ choices: [ 'bios', 'efi' ]
+ nested_virt:
+ type: bool
+ description:
+ - Enable nested virtualization.
+ virt_based_security:
+ type: bool
+ description:
+ - Enable Virtualization Based Security feature for Windows on ESXi 6.7 and later, from hardware version 14.
+ - Supported Guest OS are Windows 10 64 bit, Windows Server 2016, Windows Server 2019 and later.
+ - The firmware of virtual machine must be EFI and secure boot must be enabled.
+ - Virtualization Based Security depends on nested virtualization and Intel Virtualization Technology for Directed I/O.
+ - Deploy on unsupported ESXi, hardware version or firmware may lead to failure or deployed VM with unexpected configurations.
+ iommu:
+ type: bool
+ description: Flag to specify if I/O MMU is enabled for this virtual machine.
+ guest_id:
+ type: str
+ description:
+ - Set the guest ID.
+ - This parameter is case sensitive.
+ - C(rhel7_64Guest) for virtual machine with RHEL7 64 bit.
+ - C(centos64Guest) for virtual machine with CentOS 64 bit.
+ - C(ubuntu64Guest) for virtual machine with Ubuntu 64 bit.
+ - This field is required when creating a virtual machine, not required when creating from the template.
+ - >
+ Valid values are referenced here:
+ U(https://code.vmware.com/apis/358/doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html)
+ disk:
+ description:
+ - A list of disks to add.
+ - This parameter is case sensitive.
+ - Shrinking disks is not supported.
+ - Removing existing disks of the virtual machine is not supported.
+ - 'Attributes C(controller_type), C(controller_number), C(unit_number) are used to configure multiple types of disk
+ controllers and disks for creating or reconfiguring virtual machine. Added in Ansible 2.10.'
+ type: list
+ default: []
+ elements: dict
+ suboptions:
+ size:
+ description:
+ - Disk storage size.
+ - Please specify storage unit like [kb, mb, gb, tb].
+ type: str
+ size_kb:
+ description: Disk storage size in kb.
+ type: int
+ size_mb:
+ description: Disk storage size in mb.
+ type: int
+ size_gb:
+ description: Disk storage size in gb.
+ type: int
+ size_tb:
+ description: Disk storage size in tb.
+ type: int
+ type:
+ description:
+ - Type of disk.
+ - If C(thin) specified, disk type is set to thin disk.
+ - If C(eagerzeroedthick) specified, disk type is set to eagerzeroedthick disk. Added Ansible 2.5.
+ - If not specified, disk type is inherited from the source VM or template when cloned and thick disk, no eagerzero otherwise.
+ type: str
+ choices: [ 'thin', 'thick', 'eagerzeroedthick' ]
+ datastore:
+ type: str
+ description:
+ - The name of datastore which will be used for the disk.
+ - If C(autoselect_datastore) is set to True, will select the less used datastore whose name contains this "disk.datastore" string.
+ filename:
+ type: str
+ description:
+ - Existing disk image to be used.
+ - Filename must already exist on the datastore.
+ - Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in Ansible 2.8.
+ autoselect_datastore:
+ type: bool
+ description:
+ - Select the less used datastore.
+ - C(disk.datastore) and C(disk.autoselect_datastore) will not be used if C(datastore) is specified outside this C(disk) configuration.
+ disk_mode:
+ type: str
+ choices: ['persistent', 'independent_persistent', 'independent_nonpersistent']
+ description:
+ - Type of disk mode.
+ - Added in Ansible 2.6.
+ - If C(persistent) specified, changes are immediately and permanently written to the virtual disk. This is default.
+ - If C(independent_persistent) specified, same as persistent, but not affected by snapshots.
+ - If C(independent_nonpersistent) specified, changes to virtual disk are made to a redo log and discarded at power off,
+ but not affected by snapshots.
+ controller_type:
+ type: str
+ choices: ['buslogic', 'lsilogic', 'lsilogicsas', 'paravirtual', 'sata', 'nvme']
+ description:
+ - Type of disk controller.
+ - C(nvme) controller type support starts on ESXi 6.5 with VM hardware version C(version) 13.
+ Set this type on not supported ESXi or VM hardware version will lead to failure in deployment.
+ - When set to C(sata), please make sure C(unit_number) is correct and not used by SATA CDROMs.
+ - If set to C(sata) type, please make sure C(controller_number) and C(unit_number) are set correctly when C(cdrom) also set to C(sata) type.
+ controller_number:
+ type: int
+ choices: [0, 1, 2, 3]
+ description:
+ - Disk controller bus number.
+ - The maximum number of same type controller is 4 per VM.
+ unit_number:
+ type: int
+ description:
+ - Disk Unit Number.
+ - Valid value range from 0 to 15 for SCSI controller, except 7.
+ - Valid value range from 0 to 14 for NVME controller.
+ - Valid value range from 0 to 29 for SATA controller.
+ - C(controller_type), C(controller_number) and C(unit_number) are required when creating or reconfiguring VMs
+ with multiple types of disk controllers and disks.
+ - When creating new VM, the first configured disk in the C(disk) list will be "Hard Disk 1".
+ nvdimm:
+ description:
+ - Add or remove a virtual NVDIMM device to the virtual machine.
+ - VM virtual hardware version must be 14 or higher on vSphere 6.7 or later.
+ - Verify that guest OS of the virtual machine supports PMem before adding virtual NVDIMM device.
+ - Verify that you have the I(Datastore.Allocate) space privilege on the virtual machine.
+ - Make sure that the host or the cluster on which the virtual machine resides has available PMem resources.
+ - To add or remove virtual NVDIMM device to the existing virtual machine, it must be in power off state.
+ type: dict
+ default: {}
+ suboptions:
+ state:
+ type: str
+ description:
+ - Valid value is C(present) or C(absent).
+ - If set to C(absent), then the NVDIMM device with specified C(label) will be removed.
+ choices: ['present', 'absent']
+ size_mb:
+ type: int
+ description: Virtual NVDIMM device size in MB.
+ default: 1024
+ label:
+ type: str
+ description:
+ - The label of the virtual NVDIMM device to be removed or configured, e.g., "NVDIMM 1".
+ - 'This parameter is required when C(state) is set to C(absent), or C(present) to reconfigure NVDIMM device
+ size. When add a new device, please do not set C(label).'
+ cdrom:
+ description:
+ - A list of CD-ROM configurations for the virtual machine. Added in version 2.9.
+ - Providing CD-ROM configuration as dict is deprecated and will be removed VMware collection 4.0.0.
+ Please use a list instead.
+ - 'Parameters C(controller_type), C(controller_number), C(unit_number), C(state) are added for a list of CD-ROMs
+ configuration support.'
+ - For C(ide) controller, hot-add or hot-remove CD-ROM is not supported.
+ type: raw
+ default: []
+ suboptions:
+ type:
+ type: str
+ description:
+ - The type of CD-ROM, valid options are C(none), C(client) or C(iso).
+ - With C(none) the CD-ROM will be disconnected but present.
+ - The default value is C(client).
+ iso_path:
+ type: str
+ description:
+ - The datastore path to the ISO file to use, in the form of C([datastore1] path/to/file.iso).
+ - Required if type is set C(iso).
+ controller_type:
+ type: str
+ description:
+ - Valid options are C(ide) and C(sata).
+ - Default value is C(ide).
+ - When set to C(sata), please make sure C(unit_number) is correct and not used by SATA disks.
+ controller_number:
+ type: int
+ description:
+ - For C(ide) controller, valid value is 0 or 1.
+ - For C(sata) controller, valid value is 0 to 3.
+ unit_number:
+ type: int
+ description:
+ - For CD-ROM device attach to C(ide) controller, valid value is 0 or 1.
+ - For CD-ROM device attach to C(sata) controller, valid value is 0 to 29.
+ - C(controller_number) and C(unit_number) are mandatory attributes.
+ state:
+ type: str
+ description:
+ - Valid value is C(present) or C(absent).
+ - Default is C(present).
+ - If set to C(absent), then the specified CD-ROM will be removed.
+ resource_pool:
+ description:
+ - Use the given resource pool for virtual machine operation.
+ - This parameter is case sensitive.
+ - Resource pool should be child of the selected host parent.
+ - When not specified I(Resources) is taken as default value.
+ type: str
+ wait_for_ip_address:
+ description:
+ - Wait until vCenter detects an IP address for the virtual machine.
+ - This requires vmware-tools (vmtoolsd) to properly work after creation.
+ - "vmware-tools needs to be installed on the given virtual machine in order to work with this parameter."
+ default: false
+ type: bool
+ wait_for_ip_address_timeout:
+ description:
+ - Define a timeout (in seconds) for the wait_for_ip_address parameter.
+ default: '300'
+ type: int
+ wait_for_customization_timeout:
+ description:
+ - Define a timeout (in seconds) for the wait_for_customization parameter.
+ - Be careful when setting this value since the time guest customization took may differ among guest OSes.
+ default: '3600'
+ type: int
+ wait_for_customization:
+ description:
+ - Wait until vCenter detects all guest customizations as successfully completed.
+ - When enabled, the VM will automatically be powered on.
+ - "If vCenter does not detect guest customization start or succeed, failed events after time
+ C(wait_for_customization_timeout) parameter specified, warning message will be printed and task result is fail."
+ default: false
+ type: bool
+ state_change_timeout:
+ description:
+ - If the C(state) is set to C(shutdownguest), by default the module will return immediately after sending the shutdown signal.
+ - If this argument is set to a positive integer, the module will instead wait for the virtual machine to reach the poweredoff state.
+ - The value sets a timeout in seconds for the module to wait for the state change.
+ default: 0
+ type: int
+ snapshot_src:
+ description:
+ - Name of the existing snapshot to use to create a clone of a virtual machine.
+ - This parameter is case sensitive.
+ - While creating linked clone using C(linked_clone) parameter, this parameter is required.
+ type: str
+ linked_clone:
+ description:
+ - Whether to create a linked clone from the snapshot specified.
+ - If specified, then C(snapshot_src) is required parameter.
+ default: false
+ type: bool
+ force:
+ description:
+ - Ignore warnings and complete the actions.
+ - This parameter is useful while removing virtual machine which is powered on state.
+ - 'This module reflects the VMware vCenter API and UI workflow, as such, in some cases the `force` flag will
+ be mandatory to perform the action to ensure you are certain the action has to be taken, no matter what the consequence.
+ This is specifically the case for removing a powered on the virtual machine when C(state) is set to C(absent).'
+ default: false
+ type: bool
+ delete_from_inventory:
+ description:
+ - Whether to delete Virtual machine from inventory or delete from disk.
+ default: false
+ type: bool
+ datacenter:
+ description:
+ - Destination datacenter for the deploy operation.
+ - This parameter is case sensitive.
+ default: ha-datacenter
+ type: str
+ cluster:
+ description:
+ - The cluster name where the virtual machine will run.
+ - This is a required parameter, if C(esxi_hostname) is not set.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ - This parameter is case sensitive.
+ type: str
+ esxi_hostname:
+ description:
+ - The ESXi hostname where the virtual machine will run.
+ - This is a required parameter, if C(cluster) is not set.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ - This parameter is case sensitive.
+ type: str
+ advanced_settings:
+ description:
+ - Define a list of advanced settings to be added to the VMX config.
+ - An advanced settings object takes two fields C(key) and C(value).
+ - Incorrect key and values will be ignored.
+ elements: dict
+ type: list
+ default: []
+ annotation:
+ description:
+ - A note or annotation to include in the virtual machine.
+ type: str
+ aliases: [ 'notes' ]
+ customvalues:
+ description:
+ - Define a list of custom values to set on virtual machine.
+ - A custom value object takes two fields C(key) and C(value).
+ - Incorrect key and values will be ignored.
+ elements: dict
+ type: list
+ default: []
+ networks:
+ description:
+ - A list of networks (in the order of the NICs).
+ - Removing NICs is not allowed, while reconfiguring the virtual machine.
+ - All parameters and VMware object names are case sensitive.
+ - The I(type), I(ip), I(netmask), I(gateway), I(domain), I(dns_servers) options don't set to a guest when creating a blank new virtual machine.
+ They are set by the customization via vmware-tools.
+ If you want to set the value of the options to a guest, you need to clone from a template with installed OS and vmware-tools(also Perl when Linux).
+ type: list
+ default: []
+ elements: dict
+ suboptions:
+ name:
+ type: str
+ description:
+ - Name of the portgroup or distributed virtual portgroup for this interface.
+ - Required per entry.
+ - When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.
+ vlan:
+ type: int
+ description:
+ - VLAN number for this interface.
+ - Required per entry.
+ device_type:
+ type: str
+ description:
+ - Virtual network device.
+ - Valid value can be one of C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3), C(sriov).
+ - C(vmxnet3) is default.
+ - Optional per entry.
+ - Used for virtual hardware.
+ mac:
+ type: str
+ description:
+ - Customize MAC address.
+ - Optional per entry.
+ - Used for virtual hardware.
+ dvswitch_name:
+ type: str
+ description:
+ - Name of the distributed vSwitch.
+ - Optional per entry.
+ - Used for virtual hardware.
+ type:
+ type: str
+ description:
+ - Type of IP assignment.
+ - Valid values are one of C(dhcp), C(static).
+ - C(dhcp) is default.
+ - Optional per entry.
+ - Used for OS customization.
+ ip:
+ type: str
+ description:
+ - Static IP address. Implies C(type=static).
+ - Optional per entry.
+ - Used for OS customization.
+ netmask:
+ type: str
+ description:
+ - Static netmask required for C(ip).
+ - Optional per entry.
+ - Used for OS customization.
+ gateway:
+ type: str
+ description:
+ - Static gateway.
+ - Optional per entry.
+ - Used for OS customization.
+ dns_servers:
+ type: str
+ description:
+ - DNS servers for this network interface (Windows).
+ - Optional per entry.
+ - Used for OS customization.
+ domain:
+ type: str
+ description:
+ - Domain name for this network interface (Windows).
+ - Optional per entry.
+ - Used for OS customization.
+ connected:
+ type: bool
+ description:
+ - Indicates whether the NIC is currently connected.
+ start_connected:
+ type: bool
+ description:
+ - Specifies whether or not to connect the device when the virtual machine starts.
+ customization:
+ description:
+ - Parameters for OS customization when cloning from the template or the virtual machine, or apply to the existing virtual machine directly.
+ - Not all operating systems are supported for customization with respective vCenter version,
+ please check VMware documentation for respective OS customization.
+ - For supported customization operating system matrix, (see U(http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf))
+ - All parameters and VMware object names are case sensitive.
+ - Linux based OSes requires Perl package to be installed for OS customizations.
+ suboptions:
+ existing_vm:
+ type: bool
+ description:
+ - If set to C(true), do OS customization on the specified virtual machine directly.
+ - Common for Linux and Windows customization.
+ dns_servers:
+ type: list
+ elements: str
+ description:
+ - List of DNS servers to configure.
+ - Common for Linux and Windows customization.
+ dns_suffix:
+ type: list
+ elements: str
+ description:
+ - List of domain suffixes, also known as DNS search path.
+ - Default C(domain) parameter.
+ - Common for Linux and Windows customization.
+ domain:
+ type: str
+ description:
+ - DNS domain name to use.
+ - Common for Linux and Windows customization.
+ hostname:
+ type: str
+ description:
+ - Computer hostname.
+ - Default is shortened C(name) parameter.
+ - Allowed characters are alphanumeric (uppercase and lowercase) and minus, rest of the characters are dropped as per RFC 952.
+ - Common for Linux and Windows customization.
+ timezone:
+ type: str
+ description:
+ - Timezone.
+ - See List of supported time zones for different vSphere versions in Linux/Unix.
+ - Common for Linux and Windows customization.
+ - L(Windows, https://msdn.microsoft.com/en-us/library/ms912391.aspx).
+ hwclockUTC:
+ type: bool
+ description:
+ - Specifies whether the hardware clock is in UTC or local time.
+ - Specific to Linux customization.
+ script_text:
+ type: str
+ description:
+ - Script to run with shebang.
+ - Needs to be enabled in vmware tools with vmware-toolbox-cmd config set deployPkg enable-custom-scripts true
+ - https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39.html
+ - Specific to Linux customization.
+ version_added: '3.1.0'
+ autologon:
+ type: bool
+ description:
+ - Auto logon after virtual machine customization.
+ - Specific to Windows customization.
+ autologoncount:
+ type: int
+ description:
+ - Number of autologon after reboot.
+ - Specific to Windows customization.
+ - Ignored if C(autologon) is unset or set to C(false).
+ - If unset, 1 will be used.
+ domainadmin:
+ type: str
+ description:
+ - User used to join in AD domain.
+ - Required if C(joindomain) specified.
+ - Specific to Windows customization.
+ domainadminpassword:
+ type: str
+ description:
+ - Password used to join in AD domain.
+ - Required if C(joindomain) specified.
+ - Specific to Windows customization.
+ fullname:
+ type: str
+ description:
+ - Server owner name.
+ - Specific to Windows customization.
+ - If unset, "Administrator" will be used as a fall-back.
+ joindomain:
+ type: str
+ description:
+ - AD domain to join.
+ - Not compatible with C(joinworkgroup).
+ - Specific to Windows customization.
+ joinworkgroup:
+ type: str
+ description:
+ - Workgroup to join.
+ - Not compatible with C(joindomain).
+ - Specific to Windows customization.
+ - If unset, "WORKGROUP" will be used as a fall-back.
+ orgname:
+ type: str
+ description:
+ - Organisation name.
+ - Specific to Windows customization.
+ - If unset, "ACME" will be used as a fall-back.
+ password:
+ type: str
+ description:
+ - Local administrator password.
+ - If not defined, the password will be set to blank (that is, no password).
+ - Specific to Windows customization.
+ productid:
+ type: str
+ description:
+ - Product ID.
+ - Specific to Windows customization.
+ runonce:
+ type: list
+ elements: str
+ description:
+ - List of commands to run at first user logon.
+ - Specific to Windows customization.
+ type: dict
+ default: {}
+ vapp_properties:
+ description:
+ - A list of vApp properties.
+ - 'For full list of attributes and types refer to: U(https://code.vmware.com/apis/704/vsphere/vim.vApp.PropertyInfo.html)'
+ type: list
+ default: []
+ elements: dict
+ suboptions:
+ id:
+ type: str
+ description:
+ - Property ID.
+ - Required per entry.
+ value:
+ type: str
+ description:
+ - Property value.
+ type:
+ type: str
+ description:
+ - Value type, string type by default.
+ operation:
+ type: str
+ description:
+ - The C(remove) attribute is required only when removing properties.
+ customization_spec:
+ description:
+ - Unique name identifying the requested customization specification.
+ - This parameter is case sensitive.
+ - If set, then overrides C(customization) parameter values.
+ type: str
+ datastore:
+ description:
+ - Specify datastore or datastore cluster to provision virtual machine.
+ - This parameter takes precedence over C(disk.datastore) parameter.
+ - This parameter can be used to override datastore or datastore cluster setting
+ of the virtual machine when deployed from the template.
+ - Please see example for more usage.
+ type: str
+ convert:
+ description:
+ - Specify convert disk type while cloning template or virtual machine.
+ choices: [ 'thin', 'thick', 'eagerzeroedthick' ]
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create a virtual machine on given ESXi hostname
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: /DC1/vm/
+ name: test_vm_0001
+ state: poweredon
+ guest_id: centos64Guest
+ # This is hostname of particular ESXi server on which user wants VM to be deployed
+ esxi_hostname: "{{ esxi_hostname }}"
+ disk:
+ - size_gb: 10
+ type: thin
+ datastore: datastore1
+ hardware:
+ memory_mb: 512
+ num_cpus: 4
+ scsi: paravirtual
+ networks:
+ - name: VM Network
+ mac: aa:bb:dd:aa:00:14
+ ip: 10.10.10.100
+ netmask: 255.255.255.0
+ device_type: vmxnet3
+ wait_for_ip_address: true
+ wait_for_ip_address_timeout: 600
+ delegate_to: localhost
+ register: deploy_vm
+
+- name: Create a virtual machine from a template
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: /testvms
+ name: testvm_2
+ state: poweredon
+ template: template_el7
+ disk:
+ - size_gb: 10
+ type: thin
+ datastore: g73_datastore
+ # Add another disk from an existing VMDK
+ - filename: "[datastore1] testvms/testvm_2_1/testvm_2_1.vmdk"
+ hardware:
+ memory_mb: 512
+ num_cpus: 6
+ num_cpu_cores_per_socket: 3
+ scsi: paravirtual
+ memory_reservation_lock: true
+ mem_limit: 8096
+ mem_reservation: 4096
+ cpu_shares_level: "high"
+ mem_shares_level: "high"
+ cpu_limit: 8096
+ cpu_reservation: 4096
+ max_connections: 5
+ hotadd_cpu: true
+ hotremove_cpu: true
+ hotadd_memory: false
+ version: 12 # Hardware version of virtual machine
+ boot_firmware: "efi"
+ cdrom:
+ - controller_number: 0
+ unit_number: 0
+ state: present
+ type: iso
+ iso_path: "[datastore1] livecd.iso"
+ networks:
+ - name: VM Network
+ mac: aa:bb:dd:aa:00:14
+ wait_for_ip_address: true
+ delegate_to: localhost
+ register: deploy
+
+- name: Clone a virtual machine from Windows template and customize
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: datacenter1
+ cluster: cluster
+ name: testvm-2
+ template: template_windows
+ networks:
+ - name: VM Network
+ ip: 192.168.1.100
+ netmask: 255.255.255.0
+ gateway: 192.168.1.1
+ mac: aa:bb:dd:aa:00:14
+ domain: my_domain
+ dns_servers:
+ - 192.168.1.1
+ - 192.168.1.2
+ - vlan: 1234
+ type: dhcp
+ customization:
+ autologon: true
+ dns_servers:
+ - 192.168.1.1
+ - 192.168.1.2
+ domain: my_domain
+ password: new_vm_password
+ runonce:
+ - powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
+ delegate_to: localhost
+
+- name: Clone a virtual machine from Linux template and customize
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ state: present
+ folder: /DC1/vm
+ template: "{{ template }}"
+ name: "{{ vm_name }}"
+ cluster: DC1_C1
+ networks:
+ - name: VM Network
+ ip: 192.168.10.11
+ netmask: 255.255.255.0
+ wait_for_ip_address: true
+ customization:
+ domain: "{{ guest_domain }}"
+ dns_servers:
+ - 8.9.9.9
+ - 7.8.8.9
+ dns_suffix:
+ - example.com
+ - example2.com
+ script_text: |
+ #!/bin/bash
+ touch /tmp/touch-from-playbook
+ delegate_to: localhost
+
+- name: Rename a virtual machine (requires the virtual machine's uuid)
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: "{{ vm_uuid }}"
+ name: new_name
+ state: present
+ delegate_to: localhost
+
+- name: Remove a virtual machine by uuid
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: "{{ vm_uuid }}"
+ state: absent
+ delegate_to: localhost
+
+- name: Remove a virtual machine from inventory
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: vm_name
+ delete_from_inventory: true
+ state: absent
+ delegate_to: localhost
+
+- name: Manipulate vApp properties
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: vm_name
+ state: present
+ vapp_properties:
+ - id: remoteIP
+ category: Backup
+ label: Backup server IP
+ type: string
+ value: 10.10.10.1
+ - id: old_property
+ operation: remove
+ delegate_to: localhost
+
+- name: Set powerstate of a virtual machine to poweroff by using UUID
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: "{{ vm_uuid }}"
+ state: poweredoff
+ delegate_to: localhost
+
+- name: Deploy a virtual machine in a datastore different from the datastore of the template
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "{{ vm_name }}"
+ state: present
+ template: "{{ template_name }}"
+ # Here datastore can be different which holds template
+ datastore: "{{ virtual_machine_datastore }}"
+ hardware:
+ memory_mb: 512
+ num_cpus: 2
+ scsi: paravirtual
+ delegate_to: localhost
+
+- name: Create a diskless VM
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ dc1 }}"
+ state: poweredoff
+ cluster: "{{ ccr1 }}"
+ name: diskless_vm
+ folder: /Asia-Datacenter1/vm
+ guest_id: centos64Guest
+ datastore: "{{ ds1 }}"
+ hardware:
+ memory_mb: 1024
+ num_cpus: 2
+ num_cpu_cores_per_socket: 1
+
+- name: Create a VM with multiple disks of different disk controller types
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: /DC1/vm/
+ name: test_vm_multi_disks
+ state: poweredoff
+ guest_id: centos64Guest
+ datastore: datastore1
+ disk:
+ - size_gb: 10
+ controller_type: 'nvme'
+ controller_number: 0
+ unit_number: 0
+ - size_gb: 10
+ controller_type: 'paravirtual'
+ controller_number: 0
+ unit_number: 1
+ - size_gb: 10
+ controller_type: 'sata'
+ controller_number: 0
+ unit_number: 2
+ hardware:
+ memory_mb: 512
+ num_cpus: 4
+ version: 14
+ networks:
+ - name: VM Network
+ device_type: vmxnet3
+ delegate_to: localhost
+ register: deploy_vm
+
+- name: Create a VM with NVDIMM device
+ community.vmware.vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: /DC1/vm/
+ name: test_vm_nvdimm
+ state: poweredoff
+ guest_id: centos7_64Guest
+ datastore: datastore1
+ hardware:
+ memory_mb: 512
+ num_cpus: 4
+ version: 14
+ networks:
+ - name: VM Network
+ device_type: vmxnet3
+ nvdimm:
+ state: present
+ size_mb: 2048
+ delegate_to: localhost
+ register: deploy_vm
+'''
+
+RETURN = r'''
+instance:
+ description: metadata about the new virtual machine
+ returned: always
+ type: dict
+ sample: None
+'''
+
+import re
+import time
+import string
+
+HAS_PYVMOMI = False
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+from ansible.module_utils._text import to_text, to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ find_obj,
+ gather_vm_facts,
+ get_all_objs,
+ compile_folder_path_for_object,
+ serialize_spec,
+ vmware_argument_spec,
+ set_vm_power_state,
+ PyVmomi,
+ find_dvs_by_name,
+ find_dvspg_by_name,
+ wait_for_vm_ip,
+ quote_obj_name,
+)
+from ansible_collections.community.vmware.plugins.module_utils.vm_device_helper import PyVmomiDeviceHelper
+from ansible_collections.community.vmware.plugins.module_utils.vmware_spbm import SPBM
+
+
+class PyVmomiCache(object):
+ """ This class caches references to objects which are requested multiples times but not modified """
+
+ def __init__(self, content, dc_name=None):
+ self.content = content
+ self.dc_name = dc_name
+ self.networks = {}
+ self.clusters = {}
+ self.esx_hosts = {}
+ self.parent_datacenters = {}
+
+ def find_obj(self, content, types, name, confine_to_datacenter=True):
+ """ Wrapper around find_obj to set datacenter context """
+ result = find_obj(content, types, name)
+ if result and confine_to_datacenter:
+ if to_text(self.get_parent_datacenter(result).name) != to_text(self.dc_name):
+ result = None
+ objects = self.get_all_objs(content, types, confine_to_datacenter=True)
+ for obj in objects:
+ if name is None or to_text(obj.name) == to_text(name):
+ return obj
+ return result
+
+ def get_all_objs(self, content, types, confine_to_datacenter=True):
+ """ Wrapper around get_all_objs to set datacenter context """
+ objects = get_all_objs(content, types)
+ if confine_to_datacenter:
+ if hasattr(objects, 'items'):
+ # resource pools come back as a dictionary
+ # make a copy
+ for k, v in tuple(objects.items()):
+ parent_dc = self.get_parent_datacenter(k)
+ if parent_dc.name != self.dc_name:
+ del objects[k]
+ else:
+ # everything else should be a list
+ objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
+
+ return objects
+
+ def get_network(self, network):
+ network = quote_obj_name(network)
+
+ if network not in self.networks:
+ self.networks[network] = self.find_obj(self.content, [vim.Network], network)
+
+ return self.networks[network]
+
+ def get_cluster(self, cluster):
+ if cluster not in self.clusters:
+ self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
+
+ return self.clusters[cluster]
+
+ def get_esx_host(self, host):
+ if host not in self.esx_hosts:
+ self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
+
+ return self.esx_hosts[host]
+
+ def get_parent_datacenter(self, obj):
+ """ Walk the parent tree to find the objects datacenter """
+ if isinstance(obj, vim.Datacenter):
+ return obj
+ if obj in self.parent_datacenters:
+ return self.parent_datacenters[obj]
+ datacenter = None
+ while True:
+ if not hasattr(obj, 'parent'):
+ break
+ obj = obj.parent
+ if isinstance(obj, vim.Datacenter):
+ datacenter = obj
+ break
+ self.parent_datacenters[obj] = datacenter
+ return datacenter
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.device_helper = PyVmomiDeviceHelper(self.module)
+ self.configspec = None
+ self.relospec = None
+ self.change_detected = False # a change was detected and needs to be applied through reconfiguration
+ self.change_applied = False # a change was applied meaning at least one task succeeded
+ self.tracked_changes = {} # dict of changes made or would-be-made in check mode, updated when change_applied is set
+ self.customspec = None
+ self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
+
+ def gather_facts(self, vm):
+ return gather_vm_facts(self.content, vm)
+
+ def remove_vm(self, vm, delete_from_inventory=False):
+ # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
+ if vm.summary.runtime.powerState.lower() == 'poweredon':
+ self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, "
+ "please use 'force' parameter to remove or poweroff VM "
+ "and try removing VM again." % vm.name)
+ # Delete VM from Inventory
+ if delete_from_inventory:
+ try:
+ vm.UnregisterVM()
+ except (vim.fault.TaskInProgress,
+ vmodl.RuntimeFault) as e:
+ return {'changed': self.change_applied, 'failed': True, 'msg': e.msg, 'op': 'UnregisterVM'}
+ self.change_applied = True
+ return {'changed': self.change_applied, 'failed': False}
+ # Delete VM from Disk
+ task = vm.Destroy()
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'destroy'}
+ else:
+ return {'changed': self.change_applied, 'failed': False}
+
+ def configure_guestid(self, vm_obj, vm_creation=False):
+ # guest_id is not required when using templates
+ if self.params['template']:
+ return
+
+ # guest_id is only mandatory on VM creation
+ if vm_creation and self.params['guest_id'] is None:
+ self.module.fail_json(msg="guest_id attribute is mandatory for VM creation")
+
+ if self.params['guest_id'] and \
+ (vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()):
+ self.change_detected = True
+ self.configspec.guestId = self.params['guest_id']
+
+ def configure_resource_alloc_info(self, vm_obj):
+ """
+ Function to configure resource allocation information about virtual machine
+ :param vm_obj: VM object in case of reconfigure, None in case of deploy
+ :return: None
+ """
+ rai_change_detected = False
+ memory_allocation = vim.ResourceAllocationInfo()
+ cpu_allocation = vim.ResourceAllocationInfo()
+
+ memory_shares_info = vim.SharesInfo()
+ cpu_shares_info = vim.SharesInfo()
+
+ mem_shares_level = self.params['hardware']['mem_shares_level']
+ if mem_shares_level is not None:
+ memory_shares_info.level = mem_shares_level
+ memory_allocation.shares = memory_shares_info
+
+ if vm_obj is None or \
+ memory_allocation.shares.level != vm_obj.config.memoryAllocation.shares.level:
+ rai_change_detected = True
+
+ cpu_shares_level = self.params['hardware']['cpu_shares_level']
+ if cpu_shares_level is not None:
+ cpu_shares_info.level = cpu_shares_level
+ cpu_allocation.shares = cpu_shares_info
+ if vm_obj is None or \
+ cpu_allocation.shares.level != vm_obj.config.cpuAllocation.shares.level:
+ rai_change_detected = True
+
+ mem_shares = self.params['hardware']['mem_shares']
+ if mem_shares is not None:
+ memory_shares_info.level = 'custom'
+ memory_shares_info.shares = mem_shares
+ memory_allocation.shares = memory_shares_info
+ if vm_obj is None or \
+ memory_allocation.shares != vm_obj.config.memoryAllocation.shares:
+ rai_change_detected = True
+
+ cpu_shares = self.params['hardware']['cpu_shares']
+ if cpu_shares is not None:
+ cpu_shares_info.level = 'custom'
+ cpu_shares_info.shares = cpu_shares
+ cpu_allocation.shares = cpu_shares_info
+ if vm_obj is None or \
+ cpu_allocation.shares != vm_obj.config.cpuAllocation.shares:
+ rai_change_detected = True
+
+ mem_limit = self.params['hardware']['mem_limit']
+ if mem_limit is not None:
+ memory_allocation.limit = mem_limit
+ if vm_obj is None or \
+ memory_allocation.limit != vm_obj.config.memoryAllocation.limit:
+ rai_change_detected = True
+
+ mem_reservation = self.params['hardware']['mem_reservation']
+ if mem_reservation is not None:
+ memory_allocation.reservation = mem_reservation
+ if vm_obj is None or \
+ memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation:
+ rai_change_detected = True
+
+ cpu_limit = self.params['hardware']['cpu_limit']
+ if cpu_limit is not None:
+ cpu_allocation.limit = cpu_limit
+ if vm_obj is None or \
+ cpu_allocation.limit != vm_obj.config.cpuAllocation.limit:
+ rai_change_detected = True
+
+ cpu_reservation = self.params['hardware']['cpu_reservation']
+ if cpu_reservation is not None:
+ cpu_allocation.reservation = cpu_reservation
+ if vm_obj is None or \
+ cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation:
+ rai_change_detected = True
+
+ if rai_change_detected:
+ self.configspec.memoryAllocation = memory_allocation
+ self.configspec.cpuAllocation = cpu_allocation
+ self.change_detected = True
+
+ def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
+ # set cpu/memory/etc
+ num_cpus = self.params['hardware']['num_cpus']
+ if num_cpus is not None:
+ # check VM power state and cpu hot-add/hot-remove state before re-config VM
+ # Allow VM to be powered on during this check when in check mode, when no changes will actually be made
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and not self.module.check_mode:
+ if not vm_obj.config.cpuHotRemoveEnabled and num_cpus < vm_obj.config.hardware.numCPU:
+ self.module.fail_json(msg="Configured cpu number is less than the cpu number of the VM, "
+ "cpuHotRemove is not enabled")
+ if not vm_obj.config.cpuHotAddEnabled and num_cpus > vm_obj.config.hardware.numCPU:
+ self.module.fail_json(msg="Configured cpu number is more than the cpu number of the VM, "
+ "cpuHotAdd is not enabled")
+
+ num_cpu_cores_per_socket = self.params['hardware']['num_cpu_cores_per_socket']
+ if num_cpu_cores_per_socket is not None:
+ if num_cpus % num_cpu_cores_per_socket != 0:
+ self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple "
+ "of hardware.num_cpu_cores_per_socket")
+ if vm_obj is None or num_cpu_cores_per_socket != vm_obj.config.hardware.numCoresPerSocket:
+ self.change_detected = True
+ self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
+ if vm_obj is None or num_cpus != vm_obj.config.hardware.numCPU:
+ self.change_detected = True
+ self.configspec.numCPUs = num_cpus
+ # num_cpu is mandatory for VM creation
+ elif vm_creation and not self.params['template']:
+ self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation")
+
+ memory_mb = self.params['hardware']['memory_mb']
+ if memory_mb is not None:
+ # check VM power state and memory hotadd state before re-config VM
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ if vm_obj.config.memoryHotAddEnabled and memory_mb < vm_obj.config.hardware.memoryMB:
+ self.module.fail_json(msg="Configured memory is less than memory size of the VM, "
+ "operation is not supported")
+ # Allow VM to be powered on during this check when in check mode, when no changes will actually be made
+ elif not vm_obj.config.memoryHotAddEnabled and memory_mb != vm_obj.config.hardware.memoryMB and not self.module.check_mode:
+ self.module.fail_json(msg="memoryHotAdd is not enabled")
+ if vm_obj is None or memory_mb != vm_obj.config.hardware.memoryMB:
+ self.change_detected = True
+ self.configspec.memoryMB = memory_mb
+ # memory_mb is mandatory for VM creation
+ elif vm_creation and not self.params['template']:
+ self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation")
+
+ hotadd_memory = self.params['hardware']['hotadd_memory']
+ if hotadd_memory is not None:
+ # Allow VM to be powered on during this check when in check mode, when no changes will actually be made
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
+ vm_obj.config.memoryHotAddEnabled != hotadd_memory and not self.module.check_mode:
+ self.module.fail_json(msg="Configure hotadd memory operation is not supported when VM is power on")
+ if vm_obj is None or hotadd_memory != vm_obj.config.memoryHotAddEnabled:
+ self.change_detected = True
+ self.configspec.memoryHotAddEnabled = hotadd_memory
+
+ hotadd_cpu = self.params['hardware']['hotadd_cpu']
+ if hotadd_cpu is not None:
+ # Allow VM to be powered on during this check when in check mode, when no changes will actually be made
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
+ vm_obj.config.cpuHotAddEnabled != hotadd_cpu and not self.module.check_mode:
+ self.module.fail_json(msg="Configure hotadd cpu operation is not supported when VM is power on")
+ if vm_obj is None or hotadd_cpu != vm_obj.config.cpuHotAddEnabled:
+ self.change_detected = True
+ self.configspec.cpuHotAddEnabled = hotadd_cpu
+
+ hotremove_cpu = self.params['hardware']['hotremove_cpu']
+ if hotremove_cpu is not None:
+ # Allow VM to be powered on during this check when in check mode, when no changes will actually be made
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
+ vm_obj.config.cpuHotRemoveEnabled != hotremove_cpu and not self.module.check_mode:
+ self.module.fail_json(msg="Configure hotremove cpu operation is not supported when VM is power on")
+ if vm_obj is None or hotremove_cpu != vm_obj.config.cpuHotRemoveEnabled:
+ self.change_detected = True
+ self.configspec.cpuHotRemoveEnabled = hotremove_cpu
+
+ memory_reservation_lock = self.params['hardware']['memory_reservation_lock']
+ if memory_reservation_lock is not None:
+ if vm_obj is None or memory_reservation_lock != vm_obj.config.memoryReservationLockedToMax:
+ self.change_detected = True
+ self.configspec.memoryReservationLockedToMax = memory_reservation_lock
+
+ vpmc_enabled = self.params['hardware']['vpmc_enabled']
+ if vpmc_enabled is not None:
+ # Allow VM to be powered on during this check when in check mode, when no changes will actually be made
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
+ vm_obj.config.vPMCEnabled != vpmc_enabled and not self.module.check_mode:
+ self.module.fail_json(msg="Configure vPMC cpu operation is not supported when VM is power on")
+ if vm_obj is None or vpmc_enabled != vm_obj.config.vPMCEnabled:
+ self.change_detected = True
+ self.configspec.vPMCEnabled = vpmc_enabled
+
+ boot_firmware = self.params['hardware']['boot_firmware']
+ if boot_firmware is not None:
+ # boot firmware re-config can cause boot issue
+ if vm_obj is not None:
+ return
+ self.configspec.firmware = boot_firmware
+ self.change_detected = True
+
+ def sanitize_cdrom_params(self):
+ cdrom_specs = []
+ expected_cdrom_spec = self.params.get('cdrom')
+ if expected_cdrom_spec:
+ for cdrom_spec in expected_cdrom_spec:
+ # set CDROM controller type is 'ide' by default
+ cdrom_spec['controller_type'] = cdrom_spec.get('controller_type', 'ide').lower()
+ if cdrom_spec['controller_type'] not in ['ide', 'sata']:
+ self.module.fail_json(msg="Invalid cdrom.controller_type: %s, valid value is 'ide' or 'sata'."
+ % cdrom_spec['controller_type'])
+
+ # set CDROM state is 'present' by default
+ cdrom_spec['state'] = cdrom_spec.get('state', 'present').lower()
+ if cdrom_spec['state'] not in ['present', 'absent']:
+ self.module.fail_json(msg="Invalid cdrom.state: %s, valid value is 'present', 'absent'."
+ % cdrom_spec['state'])
+
+ if cdrom_spec['state'] == 'present':
+ # set CDROM type is 'client' by default
+ cdrom_spec['type'] = cdrom_spec.get('type', 'client').lower()
+ if cdrom_spec['type'] not in ['none', 'client', 'iso']:
+ self.module.fail_json(msg="Invalid cdrom.type: %s, valid value is 'none', 'client' or 'iso'."
+ % cdrom_spec.get('type'))
+ if cdrom_spec['type'] == 'iso' and not cdrom_spec.get('iso_path'):
+ self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
+
+ if 'controller_number' not in cdrom_spec or 'unit_number' not in cdrom_spec:
+ self.module.fail_json(msg="'cdrom.controller_number' and 'cdrom.unit_number' are required"
+ " parameters when configure CDROM list.")
+ try:
+ cdrom_ctl_num = int(cdrom_spec.get('controller_number'))
+ cdrom_ctl_unit_num = int(cdrom_spec.get('unit_number'))
+ except ValueError:
+ self.module.fail_json(msg="'cdrom.controller_number' and 'cdrom.unit_number' attributes should be "
+ "integer values.")
+
+ if cdrom_spec['controller_type'] == 'ide' and (cdrom_ctl_num not in [0, 1] or cdrom_ctl_unit_num not in [0, 1]):
+ self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s, valid"
+ " values are 0 or 1 for IDE controller."
+ % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number')))
+
+ if cdrom_spec['controller_type'] == 'sata' and (cdrom_ctl_num not in range(0, 4) or cdrom_ctl_unit_num not in range(0, 30)):
+ self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s,"
+ " valid controller_number value is 0-3, valid unit_number is 0-29"
+ " for SATA controller." % (cdrom_spec.get('controller_number'),
+ cdrom_spec.get('unit_number')))
+ cdrom_spec['controller_number'] = cdrom_ctl_num
+ cdrom_spec['unit_number'] = cdrom_ctl_unit_num
+
+ ctl_exist = False
+ for exist_spec in cdrom_specs:
+ if exist_spec.get('ctl_num') == cdrom_spec['controller_number'] and \
+ exist_spec.get('ctl_type') == cdrom_spec['controller_type']:
+ for cdrom_same_ctl in exist_spec['cdroms']:
+ if cdrom_same_ctl['unit_number'] == cdrom_spec['unit_number']:
+ self.module.fail_json(msg="Duplicate cdrom.controller_type: %s, cdrom.controller_number: %s,"
+ "cdrom.unit_number: %s parameters specified."
+ % (cdrom_spec['controller_type'], cdrom_spec['controller_number'], cdrom_spec['unit_number']))
+ ctl_exist = True
+ exist_spec['cdroms'].append(cdrom_spec)
+ break
+ if not ctl_exist:
+ cdrom_specs.append({'ctl_num': cdrom_spec['controller_number'],
+ 'ctl_type': cdrom_spec['controller_type'], 'cdroms': [cdrom_spec]})
+
+ return cdrom_specs
+
+ def configure_cdrom(self, vm_obj):
+ # Configure the VM CD-ROM
+ if self.params.get('cdrom'):
+ if vm_obj and vm_obj.config.template:
+ # Changing CD-ROM settings on a template is not supported
+ return
+
+ if isinstance(self.params.get('cdrom'), dict):
+ self.configure_cdrom_dict(vm_obj)
+ elif isinstance(self.params.get('cdrom'), list):
+ self.configure_cdrom_list(vm_obj)
+
+ def configure_cdrom_dict(self, vm_obj):
+ self.module.deprecate(
+ msg="Specifying CD-ROM configuration as dict is deprecated, Please use list to specify CD-ROM configuration.",
+ version="4.0.0",
+ collection_name="community.vmware"
+ )
+ if self.params["cdrom"].get('type') not in ['none', 'client', 'iso']:
+ self.module.fail_json(msg="cdrom.type is mandatory. Options are 'none', 'client', and 'iso'.")
+ if self.params["cdrom"]['type'] == 'iso' and not self.params["cdrom"].get('iso_path'):
+ self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
+
+ cdrom_spec = None
+ cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
+ iso_path = self.params["cdrom"].get("iso_path")
+ if len(cdrom_devices) == 0:
+ # Creating new CD-ROM
+ ide_devices = self.get_vm_ide_devices(vm=vm_obj)
+ if len(ide_devices) == 0:
+ # Creating new IDE device
+ ide_ctl = self.device_helper.create_ide_controller()
+ ide_device = ide_ctl.device
+ self.change_detected = True
+ self.configspec.deviceChange.append(ide_ctl)
+ else:
+ ide_device = ide_devices[0]
+ if len(ide_device.device) > 3:
+ self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4"
+ " IDE devices of which none are a cdrom")
+
+ cdrom_spec = self.device_helper.create_cdrom(ctl_device=ide_device, cdrom_type=self.params["cdrom"]["type"],
+ iso_path=iso_path)
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none")
+
+ elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_devices[0],
+ cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path):
+ self.device_helper.update_cdrom_config(vm_obj, self.params["cdrom"], cdrom_devices[0], iso_path=iso_path)
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ cdrom_spec.device = cdrom_devices[0]
+
+ if cdrom_spec:
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+
+ def configure_cdrom_list(self, vm_obj):
+ configured_cdroms = self.sanitize_cdrom_params()
+ # get existing CDROM devices
+ cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
+ # get existing IDE and SATA controllers
+ ide_devices = self.get_vm_ide_devices(vm=vm_obj)
+ sata_devices = self.get_vm_sata_devices(vm=vm_obj)
+
+ for expected_cdrom_spec in configured_cdroms:
+ ctl_device = None
+ if expected_cdrom_spec['ctl_type'] == 'ide' and ide_devices:
+ for device in ide_devices:
+ if device.busNumber == expected_cdrom_spec['ctl_num']:
+ ctl_device = device
+ break
+ if expected_cdrom_spec['ctl_type'] == 'sata' and sata_devices:
+ for device in sata_devices:
+ if device.busNumber == expected_cdrom_spec['ctl_num']:
+ ctl_device = device
+ break
+ # if not find create new ide or sata controller
+ if not ctl_device:
+ if expected_cdrom_spec['ctl_type'] == 'ide':
+ ide_ctl = self.device_helper.create_ide_controller(bus_number=expected_cdrom_spec['ctl_num'])
+ ctl_device = ide_ctl.device
+ self.change_detected = True
+ self.configspec.deviceChange.append(ide_ctl)
+ if expected_cdrom_spec['ctl_type'] == 'sata':
+ sata_ctl = self.device_helper.create_sata_controller(bus_number=expected_cdrom_spec['ctl_num'])
+ ctl_device = sata_ctl.device
+ self.change_detected = True
+ self.configspec.deviceChange.append(sata_ctl)
+
+ for cdrom in expected_cdrom_spec['cdroms']:
+ cdrom_device = None
+ iso_path = cdrom.get('iso_path')
+ unit_number = cdrom.get('unit_number')
+ for target_cdrom in cdrom_devices:
+ if target_cdrom.controllerKey == ctl_device.key and target_cdrom.unitNumber == unit_number:
+ cdrom_device = target_cdrom
+ break
+ # create new CD-ROM
+ if not cdrom_device and cdrom.get('state') != 'absent':
+ # Allow VM to be powered on during this check when in check mode, when no changes will actually be made
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
+ isinstance(ctl_device, vim.vm.device.VirtualIDEController) and not self.module.check_mode:
+ self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-add.')
+ if len(ctl_device.device) == 2 and isinstance(ctl_device, vim.vm.device.VirtualIDEController):
+ self.module.fail_json(msg='Maximum number of CD-ROMs attached to IDE controller is 2.')
+ if len(ctl_device.device) == 30 and isinstance(ctl_device, vim.vm.device.VirtualAHCIController):
+ self.module.fail_json(msg='Maximum number of CD-ROMs attached to SATA controller is 30.')
+
+ cdrom_spec = self.device_helper.create_cdrom(ctl_device=ctl_device, cdrom_type=cdrom['type'],
+ iso_path=iso_path, unit_number=unit_number)
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+ # re-configure CD-ROM
+ elif cdrom_device and cdrom.get('state') != 'absent' and \
+ not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device,
+ cdrom_type=cdrom['type'], iso_path=iso_path):
+ self.device_helper.update_cdrom_config(vm_obj, cdrom, cdrom_device, iso_path=iso_path)
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ cdrom_spec.device = cdrom_device
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+ # delete CD-ROM
+ elif cdrom_device and cdrom.get('state') == 'absent':
+ # Allow VM to be powered on during this check when in check mode, when no changes will actually be made
+ if vm_obj and vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff and \
+ isinstance(ctl_device, vim.vm.device.VirtualIDEController) and not self.module.check_mode:
+ self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-remove.')
+ cdrom_spec = self.device_helper.remove_cdrom(cdrom_device)
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+
+ def configure_hardware_params(self, vm_obj):
+ """
+ Function to configure hardware related configuration of virtual machine
+ Args:
+ vm_obj: virtual machine object
+ """
+ max_connections = self.params['hardware']['max_connections']
+ if max_connections is not None:
+ if vm_obj is None or max_connections != vm_obj.config.maxMksConnections:
+ self.change_detected = True
+ self.configspec.maxMksConnections = max_connections
+
+ nested_virt = self.params['hardware']['nested_virt']
+ if nested_virt is not None:
+ if vm_obj is None or nested_virt != bool(vm_obj.config.nestedHVEnabled):
+ self.change_detected = True
+ self.configspec.nestedHVEnabled = nested_virt
+
+ temp_version = self.params['hardware']['version']
+ if temp_version is not None:
+ new_version = None
+ if temp_version.lower() == 'latest':
+ # Check is to make sure vm_obj is not of type template
+ if vm_obj and not vm_obj.config.template:
+ config_option_descriptors = vm_obj.environmentBrowser.QueryConfigOptionDescriptor()
+ available_hw_versions = [int(option_desc.key.split("-")[1]) for option_desc in config_option_descriptors if option_desc.upgradeSupported]
+ temp_version = max(available_hw_versions)
+ else:
+ try:
+ temp_version = int(temp_version)
+ except ValueError:
+ self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid"
+ " values are either 'latest' or a number."
+ " Please check VMware documentation for valid VM hardware versions." % temp_version)
+
+ if isinstance(temp_version, int):
+ # Hardware version is denoted as "vmx-10"
+ new_version = "vmx-%02d" % temp_version
+
+ if vm_obj is None:
+ self.change_detected = True
+ self.configspec.version = new_version
+ # Check is to make sure vm_obj is not of type template
+ elif not vm_obj.config.template:
+ # VM exists and we need to update the hardware version
+ current_version = vm_obj.config.version
+ # Hardware version is denoted as "vmx-10"
+ version_digit = int(current_version.split("-", 1)[-1])
+ if temp_version < version_digit:
+ self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified"
+ " version '%d'. Downgrading hardware version is"
+ " not supported. Please specify version greater"
+ " than the current version." % (version_digit,
+ temp_version))
+ elif temp_version > version_digit:
+ self.change_detected = True
+ self.tracked_changes['hardware.version'] = temp_version
+ self.configspec.version = new_version
+ # Only perform the upgrade if not in check mode.
+ if not self.module.check_mode:
+ task = vm_obj.UpgradeVM_Task(new_version)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
+ self.change_applied = True
+
+ secure_boot = self.params['hardware']['secure_boot']
+ if secure_boot is not None:
+ if vm_obj is None or secure_boot != vm_obj.config.bootOptions.efiSecureBootEnabled:
+ self.change_detected = True
+ self.configspec.bootOptions = vim.vm.BootOptions()
+ self.configspec.bootOptions.efiSecureBootEnabled = secure_boot
+
+ iommu = self.params['hardware']['iommu']
+ if iommu is not None:
+ if vm_obj is None or iommu != vm_obj.config.flags.vvtdEnabled:
+ self.change_detected = True
+ if self.configspec.flags is None:
+ self.configspec.flags = vim.vm.FlagInfo()
+ self.configspec.flags.vvtdEnabled = iommu
+
+ virt_based_security = self.params['hardware']['virt_based_security']
+ if virt_based_security is not None:
+ if vm_obj is None or virt_based_security != self.configspec.flags.vbsEnabled:
+ self.change_detected = True
+ if self.configspec.flags is None:
+ self.configspec.flags = vim.vm.FlagInfo()
+ self.configspec.flags.vbsEnabled = virt_based_security
+
+ def get_device_by_type(self, vm=None, type=None):
+ device_list = []
+ if vm is None or type is None:
+ return device_list
+ for device in vm.config.hardware.device:
+ if isinstance(device, type):
+ device_list.append(device)
+
+ return device_list
+
+ def get_vm_cdrom_devices(self, vm=None):
+ return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualCdrom)
+
+ def get_vm_ide_devices(self, vm=None):
+ return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualIDEController)
+
+ def get_vm_sata_devices(self, vm=None):
+ return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualAHCIController)
+
+ def get_vm_nvdimm_ctl_device(self, vm=None):
+ return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualNVDIMMController)
+
+ def get_vm_nvdimm_devices(self, vm=None):
+ return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualNVDIMM)
+
+ def configure_nvdimm(self, vm_obj):
+ """
+ Manage virtual NVDIMM device to the virtual machine
+ Args:
+ vm_obj: virtual machine object
+ """
+ if self.params['nvdimm']['state']:
+ # Label is required when remove device
+ if self.params['nvdimm']['state'] == 'absent' and not self.params['nvdimm']['label']:
+ self.module.fail_json(msg="Please specify the label of virtual NVDIMM device using 'label' parameter"
+ " when state is set to 'absent'.")
+ # Reconfigure device requires VM in power off state
+ if vm_obj and not vm_obj.config.template:
+ # Allow VM to be powered on during this check when in check mode, when no changes will actually be made
+ if vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff and not self.module.check_mode:
+ self.module.fail_json(msg="VM is not in power off state, can not do virtual NVDIMM configuration.")
+
+ nvdimm_ctl_exists = False
+ if vm_obj and not vm_obj.config.template:
+ # Get existing NVDIMM controller
+ nvdimm_ctl = self.get_vm_nvdimm_ctl_device(vm=vm_obj)
+ if len(nvdimm_ctl) != 0:
+ nvdimm_ctl_exists = True
+ nvdimm_ctl_key = nvdimm_ctl[0].key
+ if self.params['nvdimm']['label'] is not None:
+ nvdimm_devices = self.get_vm_nvdimm_devices(vm=vm_obj)
+ if len(nvdimm_devices) != 0:
+ existing_nvdimm_dev = self.device_helper.find_nvdimm_by_label(
+ nvdimm_label=self.params['nvdimm']['label'],
+ nvdimm_devices=nvdimm_devices
+ )
+ if existing_nvdimm_dev is not None:
+ if self.params['nvdimm']['state'] == 'absent':
+ nvdimm_remove_spec = self.device_helper.remove_nvdimm(
+ nvdimm_device=existing_nvdimm_dev
+ )
+ self.change_detected = True
+ self.configspec.deviceChange.append(nvdimm_remove_spec)
+ else:
+ if existing_nvdimm_dev.capacityInMB < self.params['nvdimm']['size_mb']:
+ nvdimm_config_spec = self.device_helper.update_nvdimm_config(
+ nvdimm_device=existing_nvdimm_dev,
+ nvdimm_size=self.params['nvdimm']['size_mb']
+ )
+ self.change_detected = True
+ self.configspec.deviceChange.append(nvdimm_config_spec)
+ elif existing_nvdimm_dev.capacityInMB > self.params['nvdimm']['size_mb']:
+ self.module.fail_json(msg="Can not change NVDIMM device size to %s MB, which is"
+ " smaller than the current size %s MB."
+ % (self.params['nvdimm']['size_mb'],
+ existing_nvdimm_dev.capacityInMB))
+ # New VM or existing VM without label specified, add new NVDIMM device
+ if vm_obj is None or (vm_obj and not vm_obj.config.template and self.params['nvdimm']['label'] is None):
+ if self.params['nvdimm']['state'] == 'present':
+ vc_pmem_profile_id = None
+ # Get default PMem storage policy when host is vCenter
+ if self.is_vcenter():
+ storage_profile_name = "Host-local PMem Default Storage Policy"
+ spbm = SPBM(self.module)
+ pmem_profile = spbm.find_storage_profile_by_name(profile_name=storage_profile_name)
+ if pmem_profile is None:
+ self.module.fail_json(msg="Can not find PMem storage policy with name '%s'." % storage_profile_name)
+ vc_pmem_profile_id = pmem_profile.profileId.uniqueId
+
+ if not nvdimm_ctl_exists:
+ nvdimm_ctl_spec = self.device_helper.create_nvdimm_controller()
+ self.configspec.deviceChange.append(nvdimm_ctl_spec)
+ nvdimm_ctl_key = nvdimm_ctl_spec.device.key
+
+ nvdimm_dev_spec = self.device_helper.create_nvdimm_device(
+ nvdimm_ctl_dev_key=nvdimm_ctl_key,
+ pmem_profile_id=vc_pmem_profile_id,
+ nvdimm_dev_size_mb=self.params['nvdimm']['size_mb']
+ )
+ self.change_detected = True
+ self.configspec.deviceChange.append(nvdimm_dev_spec)
+
+ def get_vm_network_interfaces(self, vm=None):
+ device_list = []
+ if vm is None:
+ return device_list
+
+ for device in vm.config.hardware.device:
+ for device_type in self.device_helper.nic_device_type.values():
+ if isinstance(device, device_type):
+ device_list.append(device)
+
+ return device_list
+
+ def sanitize_network_params(self):
+ """
+ Sanitize user provided network provided params
+
+ Returns: A sanitized list of network params, else fails
+
+ """
+ network_devices = list()
+ # Clean up user data here
+ for network in self.params['networks']:
+ if 'name' not in network and 'vlan' not in network:
+ self.module.fail_json(msg="Please specify at least a network name or"
+ " a VLAN name under VM network list.")
+
+ if 'name' in network and self.cache.get_network(network['name']) is None:
+ self.module.fail_json(msg="Network '%(name)s' does not exist." % network)
+ elif 'vlan' in network:
+ dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
+ for dvp in dvps:
+ if hasattr(dvp.config.defaultPortConfig, 'vlan') and \
+ isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \
+ str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']):
+ network['name'] = dvp.config.name
+ break
+ if 'dvswitch_name' in network and \
+ dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \
+ dvp.config.name == network['vlan']:
+ network['name'] = dvp.config.name
+ break
+
+ if dvp.config.name == network['vlan']:
+ network['name'] = dvp.config.name
+ break
+ else:
+ self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network)
+
+ if 'type' in network:
+ if network['type'] not in ['dhcp', 'static']:
+ self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter."
+ " Valid parameters are ['dhcp', 'static']." % network)
+ if network['type'] != 'static' and ('ip' in network or 'netmask' in network):
+ self.module.fail_json(msg='Static IP information provided for network "%(name)s",'
+ ' but "type" is set to "%(type)s".' % network)
+ else:
+ # Type is optional parameter, if user provided IP or Subnet assume
+ # network type as 'static'
+ if 'ip' in network or 'netmask' in network:
+ network['type'] = 'static'
+ else:
+ # User wants network type as 'dhcp'
+ network['type'] = 'dhcp'
+
+ if network.get('type') == 'static':
+ if 'ip' in network and 'netmask' not in network:
+ self.module.fail_json(msg="'netmask' is required if 'ip' is"
+ " specified under VM network list.")
+ if 'ip' not in network and 'netmask' in network:
+ self.module.fail_json(msg="'ip' is required if 'netmask' is"
+ " specified under VM network list.")
+
+ if 'device_type' in network and network['device_type'] not in self.device_helper.nic_device_type.keys():
+ self.module.fail_json(msg="Device type specified '%s' is not valid. Please specify correct device type"
+ " from ['%s']." % (network['device_type'],
+ "', '".join(self.device_helper.nic_device_type.keys())))
+
+ if 'mac' in network and not is_mac(network['mac']):
+ self.module.fail_json(msg="Device MAC address '%s' is invalid."
+ " Please provide correct MAC address." % network['mac'])
+
+ network_devices.append(network)
+
+ return network_devices
+
+ def configure_network(self, vm_obj):
+ # Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM
+ if not self.params['networks']:
+ return
+
+ network_devices = self.sanitize_network_params()
+
+ # List current device for Clone or Idempotency
+ current_net_devices = self.get_vm_network_interfaces(vm=vm_obj)
+ if len(network_devices) < len(current_net_devices):
+ self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). "
+ "Removing interfaces is not allowed"
+ % (len(network_devices), len(current_net_devices)))
+
+ for key in range(0, len(network_devices)):
+ nic_change_detected = False
+ network_name = network_devices[key]['name']
+ if key < len(current_net_devices) and (vm_obj or self.params['template']):
+ # We are editing existing network devices, this is either when
+ # are cloning from VM or Template
+ nic = vim.vm.device.VirtualDeviceSpec()
+ nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+
+ nic.device = current_net_devices[key]
+ if "wake_on_lan" in network_devices[key] and \
+ nic.device.wakeOnLanEnabled != network_devices[key].get("wake_on_lan"):
+ nic.device.wakeOnLanEnabled = network_devices[key].get("wake_on_lan")
+ nic_change_detected = True
+ if "start_connected" in network_devices[key] and \
+ nic.device.connectable.startConnected != network_devices[key].get("start_connected"):
+ nic.device.connectable.startConnected = network_devices[key].get("start_connected")
+ nic_change_detected = True
+ if "connected" in network_devices[key] and \
+ nic.device.connectable.connected != network_devices[key].get("connected"):
+ nic.device.connectable.connected = network_devices[key].get("connected")
+ nic_change_detected = True
+ if "allow_guest_control" in network_devices[key] and \
+ nic.device.connectable.allowGuestControl != network_devices[key].get("allow_guest_control"):
+ nic.device.connectable.allowGuestControl = network_devices[key].get("allow_guest_control")
+ nic_change_detected = True
+
+ if nic.device.deviceInfo.summary != network_name:
+ nic.device.deviceInfo.summary = network_name
+ nic_change_detected = True
+ if 'device_type' in network_devices[key]:
+ device = self.device_helper.nic_device_type.get(network_devices[key]['device_type'])
+ if not isinstance(nic.device, device):
+ self.module.fail_json(msg="Changing the device type is not possible when interface is already"
+ " present. The failing device type is %s"
+ % network_devices[key]['device_type'])
+ # Changing mac address has no effect when editing interface
+ if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress:
+ self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. "
+ "The failing new MAC address is %s" % nic.device.macAddress)
+
+ else:
+ # Default device type is vmxnet3, VMware best practice
+ device_type = network_devices[key].get('device_type', 'vmxnet3')
+ nic = self.device_helper.create_nic(device_type,
+ 'Network Adapter %s' % (key + 1),
+ network_devices[key])
+ nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ nic_change_detected = True
+
+ net_obj = self.cache.get_network(network_name)
+ if hasattr(net_obj, 'portKeys'):
+ # VDS switch
+ pg_obj = None
+ if 'dvswitch_name' in network_devices[key]:
+ dvs_name = network_devices[key]['dvswitch_name']
+ dvs_obj = find_dvs_by_name(self.content, dvs_name)
+ if dvs_obj is None:
+ self.module.fail_json(msg="Unable to find distributed virtual switch %s" % dvs_name)
+ pg_obj = find_dvspg_by_name(dvs_obj, network_name)
+ if pg_obj is None:
+ self.module.fail_json(msg="Unable to find distributed port group %s" % network_name)
+ else:
+ pg_obj = self.cache.find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name)
+
+ # TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup
+ # For now, check if we are able to find distributed virtual switch
+ if not pg_obj.config.distributedVirtualSwitch:
+ self.module.fail_json(
+ msg="Failed to find distributed virtual switch which is associated with"
+ " distributed virtual portgroup '%s'. Make sure hostsystem is associated with"
+ " the given distributed virtual portgroup. Also, check if user has correct"
+ " permission to access distributed virtual switch in the given portgroup."
+ % pg_obj.name
+ )
+ if nic.device.backing and (
+ not hasattr(nic.device.backing, "port")
+ or (
+ nic.device.backing.port.portgroupKey != pg_obj.key
+ or nic.device.backing.port.switchUuid
+ != pg_obj.config.distributedVirtualSwitch.uuid
+ )
+ ):
+ nic_change_detected = True
+
+ dvs_port_connection = vim.dvs.PortConnection()
+ dvs_port_connection.portgroupKey = pg_obj.key
+ # If user specifies distributed port group without associating to the hostsystem on which
+ # virtual machine is going to be deployed then we get error. We can infer that there is no
+ # association between given distributed port group and host system.
+ host_system = self.params.get('esxi_hostname')
+ if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]:
+ self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed"
+ " virtual portgroup '%s'. Please make sure host system is associated"
+ " with given distributed virtual portgroup" % (host_system, pg_obj.name))
+ dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
+ nic.device.backing.port = dvs_port_connection
+
+ elif isinstance(net_obj, vim.OpaqueNetwork):
+ # NSX-T Logical Switch
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
+ network_id = net_obj.summary.opaqueNetworkId
+ nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
+ nic.device.backing.opaqueNetworkId = network_id
+ nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
+ nic_change_detected = True
+ else:
+ # vSwitch
+ if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
+ nic_change_detected = True
+
+ if nic.device.backing.network != net_obj:
+ nic.device.backing.network = net_obj
+ nic_change_detected = True
+
+ if nic.device.backing.deviceName != network_name:
+ nic.device.backing.deviceName = network_name
+ nic_change_detected = True
+
+ if nic_change_detected:
+ # Change to fix the issue found while configuring opaque network
+ # VMs cloned from a template with opaque network will get disconnected
+ # Replacing deprecated config parameter with relocation Spec
+ if isinstance(net_obj, vim.OpaqueNetwork):
+ self.relospec.deviceChange.append(nic)
+ else:
+ self.configspec.deviceChange.append(nic)
+ self.change_detected = True
+
+ def set_vapp_properties(self, property_spec):
+ # Sets the values in property_info
+ property_info = vim.vApp.PropertyInfo()
+ property_info.classId = property_spec.get('classId')
+ property_info.instanceId = property_spec.get('instanceId')
+ property_info.id = property_spec.get('id')
+ property_info.category = property_spec.get('category')
+ property_info.label = property_spec.get('label')
+ property_info.type = property_spec.get('type', 'string')
+ property_info.userConfigurable = property_spec.get('userConfigurable', True)
+ property_info.defaultValue = property_spec.get('defaultValue')
+ property_info.value = property_spec.get('value', '')
+ property_info.description = property_spec.get('description')
+ return property_info
+
+ def configure_vapp_properties(self, vm_obj):
+ if not self.params['vapp_properties']:
+ return
+
+ for x in self.params['vapp_properties']:
+ if not x.get('id'):
+ self.module.fail_json(msg="id is required to set vApp property")
+
+ new_vmconfig_spec = vim.vApp.VmConfigSpec()
+
+ if vm_obj:
+ # VM exists
+ orig_spec = vm_obj.config.vAppConfig
+
+ vapp_properties_current = dict((x.id, x) for x in orig_spec.property)
+ vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
+
+ # each property must have a unique key
+ # init key counter with max value + 1
+ all_keys = [x.key for x in orig_spec.property]
+ new_property_index = max(all_keys) + 1 if all_keys else 0
+
+ for property_id, property_spec in vapp_properties_to_change.items():
+ is_property_changed = False
+ new_vapp_property_spec = vim.vApp.PropertySpec()
+
+ if property_id in vapp_properties_current:
+ if property_spec.get('operation') == 'remove':
+ new_vapp_property_spec.operation = 'remove'
+ new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key
+ is_property_changed = True
+ else:
+ # this is 'edit' branch
+ new_vapp_property_spec.operation = 'edit'
+ new_vapp_property_spec.info = vapp_properties_current[property_id]
+ try:
+ for property_name, property_value in property_spec.items():
+
+ if property_name == 'operation':
+ # operation is not an info object property
+ # if set to anything other than 'remove' we don't fail
+ continue
+
+ # Updating attributes only if needed
+ if getattr(new_vapp_property_spec.info, property_name) != property_value:
+ setattr(new_vapp_property_spec.info, property_name, property_value)
+ is_property_changed = True
+
+ except Exception as e:
+ msg = "Failed to set vApp property field='%s' and value='%s'. Error: %s" % (property_name, property_value, to_text(e))
+ self.module.fail_json(msg=msg)
+ else:
+ if property_spec.get('operation') == 'remove':
+ # attempt to delete non-existent property
+ continue
+
+ # this is add new property branch
+ new_vapp_property_spec.operation = 'add'
+
+ # Configure the values in property_value
+ property_info = self.set_vapp_properties(property_spec)
+
+ new_vapp_property_spec.info = property_info
+ new_vapp_property_spec.info.key = new_property_index
+ new_property_index += 1
+ is_property_changed = True
+
+ if is_property_changed:
+ new_vmconfig_spec.property.append(new_vapp_property_spec)
+ else:
+ # New VM
+ all_keys = [x.key for x in new_vmconfig_spec.property]
+ new_property_index = max(all_keys) + 1 if all_keys else 0
+ vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
+ is_property_changed = False
+
+ for property_id, property_spec in vapp_properties_to_change.items():
+ new_vapp_property_spec = vim.vApp.PropertySpec()
+ # this is add new property branch
+ new_vapp_property_spec.operation = 'add'
+
+ # Configure the values in property_value
+ property_info = self.set_vapp_properties(property_spec)
+
+ new_vapp_property_spec.info = property_info
+ new_vapp_property_spec.info.key = new_property_index
+ new_property_index += 1
+ is_property_changed = True
+
+ if is_property_changed:
+ new_vmconfig_spec.property.append(new_vapp_property_spec)
+
+ if new_vmconfig_spec.property:
+ self.configspec.vAppConfig = new_vmconfig_spec
+ self.change_detected = True
+
+ def customize_advanced_settings(self, vm_obj, config_spec):
+ if not self.params['advanced_settings']:
+ return
+
+ vm_custom_spec = config_spec
+ vm_custom_spec.extraConfig = []
+
+ changed = False
+ facts = self.gather_facts(vm_obj)
+ for kv in self.params['advanced_settings']:
+ if 'key' not in kv or 'value' not in kv:
+ self.module.exit_json(msg="advanced_settings items required both 'key' and 'value' fields.")
+
+ # If kv is not kv fetched from facts, change it
+ if isinstance(kv['value'], (bool, int)):
+ specifiedvalue = str(kv['value']).upper()
+ comparisonvalue = facts['advanced_settings'].get(kv['key'], '').upper()
+ else:
+ specifiedvalue = kv['value']
+ comparisonvalue = facts['advanced_settings'].get(kv['key'], '')
+
+ if (kv['key'] not in facts['advanced_settings'] and kv['value'] != '') or comparisonvalue != specifiedvalue:
+ option = vim.option.OptionValue()
+ option.key = kv['key']
+ option.value = specifiedvalue
+
+ vm_custom_spec.extraConfig.append(option)
+ changed = True
+
+ if changed:
+ self.change_detected = True
+
+ def customize_customvalues(self, vm_obj):
+ if not self.params['customvalues']:
+ return
+
+ if not self.is_vcenter():
+ self.module.warn("Currently connected to ESXi. "
+ "customvalues are a vCenter feature, this parameter will be ignored.")
+ return
+
+ facts = self.gather_facts(vm_obj)
+ for kv in self.params['customvalues']:
+ if 'key' not in kv or 'value' not in kv:
+ self.module.exit_json(msg="customvalues items required both 'key' and 'value' fields.")
+
+ key_id = None
+ for field in self.content.customFieldsManager.field:
+ if field.name == kv['key']:
+ key_id = field.key
+ break
+
+ if not key_id:
+ self.module.fail_json(msg="Unable to find custom value key %s" % kv['key'])
+
+ # If kv is not kv fetched from facts, change it
+ if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']:
+ self.content.customFieldsManager.SetField(entity=vm_obj, key=key_id, value=kv['value'])
+ self.change_detected = True
+
+ def customize_vm(self, vm_obj):
+
+ # User specified customization specification
+ custom_spec_name = self.params.get('customization_spec')
+ if custom_spec_name:
+ cc_mgr = self.content.customizationSpecManager
+ if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name):
+ temp_spec = cc_mgr.GetCustomizationSpec(name=custom_spec_name)
+ self.customspec = temp_spec.spec
+ return
+ self.module.fail_json(msg="Unable to find customization specification"
+ " '%s' in given configuration." % custom_spec_name)
+
+ # Network settings
+ adaptermaps = []
+ for network in self.params['networks']:
+
+ guest_map = vim.vm.customization.AdapterMapping()
+ guest_map.adapter = vim.vm.customization.IPSettings()
+
+ if 'ip' in network and 'netmask' in network:
+ guest_map.adapter.ip = vim.vm.customization.FixedIp()
+ guest_map.adapter.ip.ipAddress = str(network['ip'])
+ guest_map.adapter.subnetMask = str(network['netmask'])
+ elif 'type' in network and network['type'] == 'dhcp':
+ guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
+
+ if 'gateway' in network:
+ guest_map.adapter.gateway = network['gateway']
+
+ # On Windows, DNS domain and DNS servers can be set by network interface
+ # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html
+ if 'domain' in network:
+ guest_map.adapter.dnsDomain = network['domain']
+ elif self.params['customization']['domain'] is not None:
+ guest_map.adapter.dnsDomain = self.params['customization']['domain']
+
+ if 'dns_servers' in network:
+ guest_map.adapter.dnsServerList = network['dns_servers']
+ elif self.params['customization']['dns_servers'] is not None:
+ guest_map.adapter.dnsServerList = self.params['customization']['dns_servers']
+
+ adaptermaps.append(guest_map)
+
+ # Global DNS settings
+ globalip = vim.vm.customization.GlobalIPSettings()
+ if self.params['customization']['dns_servers'] is not None:
+ globalip.dnsServerList = self.params['customization']['dns_servers']
+
+ # TODO: Maybe list the different domains from the interfaces here by default ?
+ dns_suffixes = []
+ dns_suffix = self.params['customization']['dns_suffix']
+ if dns_suffix:
+ if isinstance(dns_suffix, list):
+ dns_suffixes += dns_suffix
+ else:
+ dns_suffixes.append(dns_suffix)
+
+ globalip.dnsSuffixList = dns_suffixes
+
+ if self.params['customization']['domain'] is not None:
+ dns_suffixes.insert(0, self.params['customization']['domain'])
+ globalip.dnsSuffixList = dns_suffixes
+
+ if self.params['guest_id'] is not None:
+ guest_id = self.params['guest_id']
+ else:
+ guest_id = vm_obj.summary.config.guestId
+
+ # For windows guest OS, use SysPrep
+ # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail
+ if 'win' in guest_id:
+ ident = vim.vm.customization.Sysprep()
+
+ ident.userData = vim.vm.customization.UserData()
+
+ # Setting hostName, orgName and fullName is mandatory, so we set some default when missing
+ ident.userData.computerName = vim.vm.customization.FixedName()
+ # computer name will be truncated to 15 characters if using VM name
+ default_name = ""
+ if 'name' in self.params and self.params['name']:
+ default_name = self.params['name'].replace(' ', '')
+ elif vm_obj:
+ default_name = vm_obj.name.replace(' ', '')
+ punctuation = string.punctuation.replace('-', '')
+ default_name = ''.join([c for c in default_name if c not in punctuation])
+
+ if self.params['customization']['hostname'] is not None:
+ ident.userData.computerName.name = self.params['customization']['hostname'][0:15]
+ else:
+ ident.userData.computerName.name = default_name[0:15]
+
+ ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator'))
+ ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME'))
+
+ if self.params['customization']['productid'] is not None:
+ ident.userData.productId = str(self.params['customization']['productid'])
+
+ ident.guiUnattended = vim.vm.customization.GuiUnattended()
+
+ if self.params['customization']['autologon'] is not None:
+ ident.guiUnattended.autoLogon = self.params['customization']['autologon']
+ ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1)
+
+ if self.params['customization']['timezone'] is not None:
+ # Check if timezone value is a int before proceeding.
+ ident.guiUnattended.timeZone = self.device_helper.integer_value(
+ self.params['customization']['timezone'],
+ 'customization.timezone')
+
+ ident.identification = vim.vm.customization.Identification()
+
+ if self.params['customization']['password'] is None or self.params['customization']['password'] == '':
+ ident.guiUnattended.password = None
+ else:
+ ident.guiUnattended.password = vim.vm.customization.Password()
+ ident.guiUnattended.password.value = str(self.params['customization']['password'])
+ ident.guiUnattended.password.plainText = True
+
+ if self.params['customization']['joindomain'] is not None:
+ if self.params['customization']['domainadmin'] is None or self.params['customization']['domainadminpassword'] is None:
+ self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
+ "joindomain feature")
+
+ ident.identification.domainAdmin = self.params['customization']['domainadmin']
+ ident.identification.joinDomain = self.params['customization']['joindomain']
+ ident.identification.domainAdminPassword = vim.vm.customization.Password()
+ ident.identification.domainAdminPassword.value = self.params['customization']['domainadminpassword']
+ ident.identification.domainAdminPassword.plainText = True
+
+ elif self.params['customization']['joinworkgroup'] is not None:
+ ident.identification.joinWorkgroup = self.params['customization']['joinworkgroup']
+
+ if self.params['customization']['runonce'] is not None:
+ ident.guiRunOnce = vim.vm.customization.GuiRunOnce()
+ ident.guiRunOnce.commandList = self.params['customization']['runonce']
+
+ else:
+ # FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail!
+
+ # For Linux guest OS, use LinuxPrep
+ # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html
+ ident = vim.vm.customization.LinuxPrep()
+
+ # TODO: Maybe add domain from interface if missing ?
+ if self.params['customization']['domain'] is not None:
+ ident.domain = self.params['customization']['domain']
+
+ ident.hostName = vim.vm.customization.FixedName()
+ default_name = ""
+ if 'name' in self.params and self.params['name']:
+ default_name = self.params['name']
+ elif vm_obj:
+ default_name = vm_obj.name
+
+ if self.params['customization']['hostname'] is not None:
+ hostname = self.params['customization']['hostname'].split('.')[0]
+ else:
+ hostname = default_name.split('.')[0]
+
+ # Remove all characters except alphanumeric and minus which is allowed by RFC 952
+ valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname)
+ ident.hostName.name = valid_hostname
+
+ # List of supported time zones for different vSphere versions in Linux/Unix systems
+ # https://kb.vmware.com/s/article/2145518
+ if self.params['customization']['timezone'] is not None:
+ ident.timeZone = self.params['customization']['timezone']
+ if self.params['customization']['hwclockUTC'] is not None:
+ ident.hwClockUTC = self.params['customization']['hwclockUTC']
+ if self.params['customization']['script_text'] is not None:
+ ident.scriptText = self.params['customization']['script_text']
+
+ self.customspec = vim.vm.customization.Specification()
+ self.customspec.nicSettingMap = adaptermaps
+ self.customspec.globalIPSettings = globalip
+ self.customspec.identity = ident
+
+ def get_vm_scsi_controllers(self, vm_obj):
+ # If vm_obj doesn't exist there is no SCSI controller to find
+ scsi_ctls = []
+ if vm_obj is None:
+ return None
+
+ for device in vm_obj.config.hardware.device:
+ if self.device_helper.is_scsi_controller(device):
+ scsi_ctl = vim.vm.device.VirtualDeviceSpec()
+ scsi_ctl.device = device
+ scsi_ctls.append(scsi_ctl)
+
+ return scsi_ctls
+
+ def get_configured_disk_size(self, expected_disk_spec):
+ # what size is it?
+ if [x for x in expected_disk_spec.keys() if (x.startswith('size_') or x == 'size') and expected_disk_spec[x]]:
+ # size, size_tb, size_gb, size_mb, size_kb
+ if expected_disk_spec['size']:
+ size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
+ disk_size_m = size_regex.match(expected_disk_spec['size'])
+ try:
+ if disk_size_m:
+ expected = disk_size_m.group(1)
+ unit = disk_size_m.group(2)
+ else:
+ raise ValueError
+
+ if re.match(r'\d+\.\d+', expected):
+ # We found float value in string, let's typecast it
+ expected = float(expected)
+ else:
+ # We found int value in string, let's typecast it
+ expected = int(expected)
+
+ if not expected or not unit:
+ raise ValueError
+
+ except (TypeError, ValueError, NameError):
+ # Common failure
+ self.module.fail_json(msg="Failed to parse disk size please review value"
+ " provided using documentation.")
+ else:
+ param = [x for x in expected_disk_spec.keys() if x.startswith('size_') and expected_disk_spec[x]][0]
+ unit = param.split('_')[-1]
+ expected = expected_disk_spec[param]
+
+ disk_units = dict(tb=3, gb=2, mb=1, kb=0)
+ if unit in disk_units:
+ unit = unit.lower()
+ return expected * (1024 ** disk_units[unit])
+ else:
+ self.module.fail_json(msg="%s is not a supported unit for disk size."
+ " Supported units are ['%s']." % (unit,
+ "', '".join(disk_units.keys())))
+
+ # No size found but disk, fail
+ self.module.fail_json(
+ msg="No size, size_kb, size_mb, size_gb or size_tb defined in disk configuration")
+
+ def add_existing_vmdk(self, vm_obj, expected_disk_spec, diskspec, scsi_ctl):
+ """
+ Adds vmdk file described by expected_disk_spec['filename'], retrieves the file
+ information and adds the correct spec to self.configspec.deviceChange.
+ """
+ filename = expected_disk_spec['filename']
+ # If this is a new disk, or the disk file names are different
+ if (vm_obj and diskspec.device.backing.fileName != filename) or vm_obj is None:
+ diskspec.device.backing.fileName = filename
+ diskspec.device.key = -1
+ self.change_detected = True
+ self.configspec.deviceChange.append(diskspec)
+
+ def sanitize_disk_parameters(self, vm_obj):
+ """
+
+ Sanitize user provided disk parameters to configure multiple types of disk controllers and attached disks
+
+ Returns: A sanitized dict of disk params, else fails
+ e.g., [{'type': 'nvme', 'num': 1, 'disk': []}, {}, {}, {}]}
+
+ """
+ controllers = []
+ for disk_spec in self.params.get('disk'):
+ if disk_spec['controller_type'] is None or disk_spec['controller_number'] is None or disk_spec['unit_number'] is None:
+ self.module.fail_json(msg="'disk.controller_type', 'disk.controller_number' and 'disk.unit_number' are"
+ " mandatory parameters when configure multiple disk controllers and disks.")
+
+ ctl_num = disk_spec['controller_number']
+ ctl_unit_num = disk_spec['unit_number']
+
+ disk_spec['unit_number'] = ctl_unit_num
+ ctl_type = disk_spec['controller_type']
+
+ if len(controllers) != 0:
+ ctl_exist = False
+ for ctl in controllers:
+ if ctl['type'] in self.device_helper.scsi_device_type.keys() and ctl_type in self.device_helper.scsi_device_type.keys():
+ if ctl['type'] != ctl_type and ctl['num'] == ctl_num:
+ self.module.fail_json(msg="Specified SCSI controller '%s' and '%s' have the same bus number"
+ ": '%s'" % (ctl['type'], ctl_type, ctl_num))
+
+ if ctl['type'] == ctl_type and ctl['num'] == ctl_num:
+ for i in range(0, len(ctl['disk'])):
+ if disk_spec['unit_number'] == ctl['disk'][i]['unit_number']:
+ self.module.fail_json(msg="Specified the same 'controller_type, controller_number, "
+ "unit_number in disk configuration '%s:%s'" % (ctl_type, ctl_num))
+ ctl['disk'].append(disk_spec)
+ ctl_exist = True
+ break
+ if not ctl_exist:
+ controllers.append({'type': ctl_type, 'num': ctl_num, 'disk': [disk_spec]})
+ else:
+ controllers.append({'type': ctl_type, 'num': ctl_num, 'disk': [disk_spec]})
+
+ return controllers
+
+ def set_disk_parameters(self, disk_spec, expected_disk_spec, reconfigure=False):
+ disk_modified = False
+ if expected_disk_spec['disk_mode']:
+ disk_mode = expected_disk_spec.get('disk_mode')
+ if reconfigure:
+ if disk_spec.device.backing.diskMode != disk_mode:
+ disk_spec.device.backing.diskMode = disk_mode
+ disk_modified = True
+ else:
+ disk_spec.device.backing.diskMode = disk_mode
+ # default is persistent for new deployed VM
+ elif not reconfigure:
+ disk_spec.device.backing.diskMode = "persistent"
+
+ if not reconfigure:
+ disk_type = expected_disk_spec.get('type', 'thin')
+ if disk_type == 'thin':
+ disk_spec.device.backing.thinProvisioned = True
+ elif disk_type == 'eagerzeroedthick':
+ disk_spec.device.backing.eagerlyScrub = True
+
+ kb = self.get_configured_disk_size(expected_disk_spec)
+ if reconfigure:
+ if disk_spec.device.capacityInKB > kb:
+ self.module.fail_json(msg="Given disk size is smaller than found (%d < %d)."
+ "Reducing disks is not allowed." % (kb, disk_spec.device.capacityInKB))
+ if disk_spec.device.capacityInKB != kb:
+ disk_spec.device.capacityInKB = kb
+ disk_modified = True
+ else:
+ disk_spec.device.capacityInKB = kb
+ disk_modified = True
+
+ return disk_modified
+
+ def configure_multiple_controllers_disks(self, vm_obj):
+ ctls = self.sanitize_disk_parameters(vm_obj)
+ if len(ctls) == 0:
+ return
+ for ctl in ctls:
+ # get existing specified disk controller and attached disks
+ disk_ctl, disk_list = self.device_helper.get_controller_disks(vm_obj, ctl['type'], ctl['num'])
+ if disk_ctl is None:
+ # check if scsi controller key already used
+ if ctl['type'] in self.device_helper.scsi_device_type.keys() and vm_obj is not None:
+ scsi_ctls = self.get_vm_scsi_controllers(vm_obj)
+ if scsi_ctls:
+ for scsi_ctl in scsi_ctls:
+ if scsi_ctl.device.busNumber == ctl['num']:
+ self.module.fail_json(msg="Specified SCSI controller number '%s' is already used"
+ " by: %s" % (ctl['num'], scsi_ctl))
+ # create new disk controller if not exist
+ disk_ctl_spec = self.device_helper.create_disk_controller(ctl['type'], ctl['num'])
+ self.change_detected = True
+ self.configspec.deviceChange.append(disk_ctl_spec)
+ else:
+ disk_ctl_spec = vim.vm.device.VirtualDeviceSpec()
+ disk_ctl_spec.device = disk_ctl
+ for j in range(0, len(ctl['disk'])):
+ hard_disk = None
+ hard_disk_spec = None
+ hard_disk_exist = False
+ disk_modified_for_spec = False
+ disk_modified_for_disk = False
+ disk_unit_number = ctl['disk'][j]['unit_number']
+ # from attached disk list find the specified one
+ if len(disk_list) != 0:
+ for disk in disk_list:
+ if disk.unitNumber == disk_unit_number:
+ hard_disk = disk
+ hard_disk_exist = True
+ break
+ # if find the disk do reconfigure
+ if hard_disk_exist:
+ hard_disk_spec = vim.vm.device.VirtualDeviceSpec()
+ hard_disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ hard_disk_spec.device = hard_disk
+ disk_modified_for_spec = self.set_disk_parameters(hard_disk_spec, ctl['disk'][j], reconfigure=True)
+ # if no disk or the specified one not exist do create new disk
+ if len(disk_list) == 0 or not hard_disk_exist:
+ hard_disk = self.device_helper.create_hard_disk(disk_ctl_spec, disk_unit_number)
+ hard_disk.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
+ disk_modified_for_disk = self.set_disk_parameters(hard_disk, ctl['disk'][j])
+
+ # Only update the configspec that will be applied in reconfigure_vm if something actually changed
+ if disk_modified_for_spec:
+ self.change_detected = True
+ self.configspec.deviceChange.append(hard_disk_spec)
+ if disk_modified_for_disk:
+ self.change_detected = True
+ self.configspec.deviceChange.append(hard_disk)
+
+ def configure_disks(self, vm_obj):
+ # Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM
+ if not self.params['disk']:
+ return
+
+ # if one of 'controller_type', 'controller_number', 'unit_number' parameters set in one of disks' configuration
+ # will call configure_multiple_controllers_disks() function
+ # do not support mixed old scsi disks configuration and new multiple controller types of disks configuration
+ configure_multiple_ctl = False
+ for disk_spec in self.params.get('disk'):
+ if disk_spec['controller_type'] or disk_spec['controller_number'] or disk_spec['unit_number']:
+ configure_multiple_ctl = True
+ break
+ if configure_multiple_ctl:
+ self.configure_multiple_controllers_disks(vm_obj)
+ return
+
+ # do single controller type disks configuration
+ scsi_ctls = self.get_vm_scsi_controllers(vm_obj)
+
+ # Create scsi controller only if we are deploying a new VM, not a template or reconfiguring
+ if vm_obj is None or not scsi_ctls:
+ scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type(), 0)
+ self.change_detected = True
+ self.configspec.deviceChange.append(scsi_ctl)
+ else:
+ scsi_ctl = scsi_ctls[0]
+
+ disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \
+ if vm_obj is not None else None
+
+ if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks):
+ self.module.fail_json(msg="Provided disks configuration has less disks than "
+ "the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks)))
+
+ disk_index = 0
+ for expected_disk_spec in self.params.get('disk'):
+ disk_modified = False
+ # If we are manipulating and existing objects which has disks and disk_index is in disks
+ if vm_obj is not None and disks is not None and disk_index < len(disks):
+ diskspec = vim.vm.device.VirtualDeviceSpec()
+ # set the operation to edit so that it knows to keep other settings
+ diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ diskspec.device = disks[disk_index]
+ else:
+ diskspec = self.device_helper.create_hard_disk(scsi_ctl, disk_index)
+ disk_modified = True
+
+ # increment index for next disk search
+ disk_index += 1
+ # index 7 is reserved to SCSI controller
+ if disk_index == 7:
+ disk_index += 1
+
+ if expected_disk_spec['disk_mode']:
+ disk_mode = expected_disk_spec.get('disk_mode', 'persistent')
+
+ if (vm_obj and diskspec.device.backing.diskMode != disk_mode) or (vm_obj is None):
+ diskspec.device.backing.diskMode = disk_mode
+ disk_modified = True
+ else:
+ diskspec.device.backing.diskMode = "persistent"
+
+ # is it thin?
+ if expected_disk_spec['type']:
+ disk_type = expected_disk_spec.get('type', '').lower()
+ if disk_type == 'thin':
+ diskspec.device.backing.thinProvisioned = True
+ elif disk_type == 'eagerzeroedthick':
+ diskspec.device.backing.eagerlyScrub = True
+
+ if expected_disk_spec['filename']:
+ self.add_existing_vmdk(vm_obj, expected_disk_spec, diskspec, scsi_ctl)
+ continue
+ if vm_obj is None or self.params['template']:
+ # We are creating new VM or from Template
+ # Only create virtual device if not backed by vmdk in original template
+ if diskspec.device.backing.fileName == '':
+ diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
+
+ # which datastore?
+ if expected_disk_spec.get('datastore'):
+ # TODO: This is already handled by the relocation spec,
+ # but it needs to eventually be handled for all the
+ # other disks defined
+ pass
+
+ kb = self.get_configured_disk_size(expected_disk_spec)
+ # VMware doesn't allow to reduce disk sizes
+ if kb < diskspec.device.capacityInKB:
+ self.module.fail_json(
+ msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." %
+ (kb, diskspec.device.capacityInKB))
+
+ if kb != diskspec.device.capacityInKB or disk_modified:
+ diskspec.device.capacityInKB = kb
+ self.configspec.deviceChange.append(diskspec)
+
+ self.change_detected = True
+
+ def select_host(self):
+ hostsystem = self.cache.get_esx_host(self.params['esxi_hostname'])
+ if not hostsystem:
+ self.module.fail_json(msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params)
+ if hostsystem.runtime.connectionState != 'connected' or hostsystem.runtime.inMaintenanceMode:
+ self.module.fail_json(msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' % self.params)
+ return hostsystem
+
+ def autoselect_datastore(self):
+ datastore = None
+ datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
+
+ if datastores is None or len(datastores) == 0:
+ self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
+
+ datastore_freespace = 0
+ for ds in datastores:
+ if not self.is_datastore_valid(datastore_obj=ds):
+ continue
+
+ if ds.summary.freeSpace > datastore_freespace:
+ datastore = ds
+ datastore_freespace = ds.summary.freeSpace
+
+ return datastore
+
+ def select_datastore(self, vm_obj=None):
+ datastore = None
+ datastore_name = None
+
+ if self.params['disk']:
+ # TODO: really use the datastore for newly created disks
+ if self.params['disk'][0]['autoselect_datastore']:
+ datastores = []
+
+ if self.params['cluster']:
+ cluster = self.find_cluster_by_name(self.params['cluster'], self.content)
+
+ for host in cluster.host:
+ for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo:
+ if mi.volume.type == "VMFS":
+ datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name))
+ elif self.params['esxi_hostname']:
+ host = self.find_hostsystem_by_name(self.params['esxi_hostname'])
+
+ for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo:
+ if mi.volume.type == "VMFS":
+ datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name))
+ else:
+ datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
+ datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']]
+
+ datastore_freespace = 0
+ for ds in datastores:
+ if not self.is_datastore_valid(datastore_obj=ds):
+ continue
+
+ if (ds.summary.freeSpace > datastore_freespace) or (ds.summary.freeSpace == datastore_freespace and not datastore):
+ # If datastore field is provided, filter destination datastores
+ if self.params['disk'][0]['datastore'] and ds.name.find(self.params['disk'][0]['datastore']) < 0:
+ continue
+
+ datastore = ds
+ datastore_name = datastore.name
+ datastore_freespace = ds.summary.freeSpace
+
+ elif self.params['disk'][0]['datastore']:
+ datastore_name = self.params['disk'][0]['datastore']
+ # Check if user has provided datastore cluster first
+ datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
+ if datastore_cluster:
+ # If user specified datastore cluster so get recommended datastore
+ datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
+ # Check if get_recommended_datastore or user specified datastore exists or not
+ datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
+ else:
+ self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore")
+
+ if not datastore and self.params['template']:
+ # use the template's existing DS
+ disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
+ if disks:
+ datastore = disks[0].backing.datastore
+ datastore_name = datastore.name
+ # validation
+ if datastore:
+ dc = self.cache.get_parent_datacenter(datastore)
+ if dc.name != self.params['datacenter']:
+ datastore = self.autoselect_datastore()
+ datastore_name = datastore.name
+
+ if not datastore:
+ if len(self.params['disk']) != 0 or self.params['template'] is None:
+ self.module.fail_json(msg="Unable to find the datastore with given parameters."
+ " This could mean, %s is a non-existent virtual machine and module tried to"
+ " deploy it as new virtual machine with no disk. Please specify disks parameter"
+ " or specify template to clone from." % self.params['name'])
+ self.module.fail_json(msg="Failed to find a matching datastore")
+
+ return datastore, datastore_name
+
+ def obj_has_parent(self, obj, parent):
+ if obj is None and parent is None:
+ raise AssertionError()
+ current_parent = obj
+
+ while True:
+ if current_parent.name == parent.name:
+ return True
+
+ # Check if we have reached till root folder
+ moid = current_parent._moId
+ if moid in ['group-d1', 'ha-folder-root']:
+ return False
+
+ current_parent = current_parent.parent
+ if current_parent is None:
+ return False
+
+ def get_scsi_type(self):
+ disk_controller_type = self.params['hardware']['scsi']
+ if disk_controller_type is not None:
+ return disk_controller_type
+ return "paravirtual"
+
+ def find_folder(self, searchpath):
+ """ Walk inventory objects one position of the searchpath at a time """
+
+ # split the searchpath so we can iterate through it
+ paths = [x.replace('/', '') for x in searchpath.split('/')]
+ paths_total = len(paths) - 1
+ position = 0
+
+ # recursive walk while looking for next element in searchpath
+ root = self.content.rootFolder
+ while root and position <= paths_total:
+ change = False
+ if hasattr(root, 'childEntity'):
+ for child in root.childEntity:
+ if child.name == paths[position]:
+ root = child
+ position += 1
+ change = True
+ break
+ elif isinstance(root, vim.Datacenter):
+ if hasattr(root, 'vmFolder'):
+ if root.vmFolder.name == paths[position]:
+ root = root.vmFolder
+ position += 1
+ change = True
+ else:
+ root = None
+
+ if not change:
+ root = None
+
+ return root
+
+ def get_resource_pool(self, cluster=None, host=None, resource_pool=None):
+ """ Get a resource pool, filter on cluster, esxi_hostname or resource_pool if given """
+
+ cluster_name = cluster or self.params.get('cluster', None)
+ host_name = host or self.params.get('esxi_hostname', None)
+ resource_pool_name = resource_pool or self.params.get('resource_pool', None)
+
+ # get the datacenter object
+ datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
+ if not datacenter:
+ self.module.fail_json(msg='Unable to find datacenter "%s"' % self.params['datacenter'])
+
+ # if cluster is given, get the cluster object
+ if cluster_name:
+ cluster = find_obj(self.content, [vim.ComputeResource], cluster_name, folder=datacenter)
+ if not cluster:
+ self.module.fail_json(msg='Unable to find cluster "%s"' % cluster_name)
+ # if host is given, get the cluster object using the host
+ elif host_name:
+ host = find_obj(self.content, [vim.HostSystem], host_name, folder=datacenter)
+ if not host:
+ self.module.fail_json(msg='Unable to find host "%s"' % host_name)
+ cluster = host.parent
+ else:
+ cluster = None
+
+ # get resource pools limiting search to cluster or datacenter
+ resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name, folder=cluster or datacenter)
+ if not resource_pool:
+ if resource_pool_name:
+ self.module.fail_json(msg='Unable to find resource_pool "%s"' % resource_pool_name)
+ else:
+ self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster')
+ return resource_pool
+
+ def deploy_vm(self):
+ # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
+
+ # FIXME:
+ # - static IPs
+
+ self.folder = self.params.get('folder', None)
+ if self.folder is None:
+ self.module.fail_json(msg="Folder is required parameter while deploying new virtual machine")
+
+ # Prepend / if it was missing from the folder path, also strip trailing slashes
+ if not self.folder.startswith('/'):
+ self.folder = '/%(folder)s' % self.params
+ self.folder = self.folder.rstrip('/')
+
+ datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
+ if datacenter is None:
+ self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
+
+ dcpath = compile_folder_path_for_object(datacenter)
+
+ # Nested folder does not have trailing /
+ if not dcpath.endswith('/'):
+ dcpath += '/'
+
+ # Check for full path first in case it was already supplied
+ if self.folder.startswith(
+ dcpath + self.params["datacenter"] + "/vm"
+ ) or self.folder.startswith(dcpath + "/" + self.params["datacenter"] + "/vm"):
+ fullpath = self.folder
+ elif self.folder.startswith("/vm/") or self.folder == "/vm":
+ fullpath = "%s%s%s" % (dcpath, self.params["datacenter"], self.folder)
+ elif self.folder.startswith("/"):
+ fullpath = "%s%s/vm%s" % (dcpath, self.params["datacenter"], self.folder)
+ else:
+ fullpath = "%s%s/vm/%s" % (dcpath, self.params["datacenter"], self.folder)
+
+ f_obj = self.content.searchIndex.FindByInventoryPath(fullpath)
+
+ # abort if no strategy was successful
+ if f_obj is None:
+ # Add some debugging values in failure.
+ details = {
+ 'datacenter': datacenter.name,
+ 'datacenter_path': dcpath,
+ 'folder': self.folder,
+ 'full_search_path': fullpath,
+ }
+ self.module.fail_json(msg='No folder %s matched in the search path : %s' % (self.folder, fullpath),
+ details=details)
+
+ destfolder = f_obj
+
+ if self.params['template']:
+ vm_obj = self.get_vm_or_template(template_name=self.params['template'])
+ if vm_obj is None:
+ self.module.fail_json(msg="Could not find a template named %(template)s" % self.params)
+ else:
+ vm_obj = None
+
+ # always get a resource_pool
+ resource_pool = self.get_resource_pool()
+
+ # set the destination datastore for VM & disks
+ if self.params['datastore']:
+ # Give precedence to datastore value provided by user
+ # User may want to deploy VM to specific datastore.
+ datastore_name = self.params['datastore']
+ # Check if user has provided datastore cluster first
+ datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
+ if datastore_cluster:
+ # If user specified datastore cluster so get recommended datastore
+ datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
+ # Check if get_recommended_datastore or user specified datastore exists or not
+ datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
+ else:
+ (datastore, datastore_name) = self.select_datastore(vm_obj)
+
+ self.configspec = vim.vm.ConfigSpec()
+ self.configspec.deviceChange = []
+ # create the relocation spec
+ self.relospec = vim.vm.RelocateSpec()
+ self.relospec.deviceChange = []
+ self.configure_guestid(vm_obj=vm_obj, vm_creation=True)
+ self.configure_cpu_and_memory(vm_obj=vm_obj, vm_creation=True)
+ self.configure_hardware_params(vm_obj=vm_obj)
+ self.configure_resource_alloc_info(vm_obj=vm_obj)
+ self.configure_vapp_properties(vm_obj=vm_obj)
+ self.configure_disks(vm_obj=vm_obj)
+ self.configure_network(vm_obj=vm_obj)
+ self.configure_cdrom(vm_obj=vm_obj)
+ self.configure_nvdimm(vm_obj=vm_obj)
+
+ # Find if we need network customizations (find keys in dictionary that requires customizations)
+ network_changes = False
+ for nw in self.params['networks']:
+ for key in nw:
+ # We don't need customizations for these keys
+ if key == 'type' and nw['type'] == 'dhcp':
+ network_changes = True
+ break
+ if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected', 'dvswitch_name'):
+ network_changes = True
+ break
+
+ if any(v is not None for v in self.params['customization'].values()) or network_changes or self.params.get('customization_spec') is not None:
+ self.customize_vm(vm_obj=vm_obj)
+
+ clonespec = None
+ clone_method = None
+ try:
+ if self.params['template']:
+ # Only select specific host when ESXi hostname is provided
+ if self.params['esxi_hostname']:
+ self.relospec.host = self.select_host()
+ self.relospec.datastore = datastore
+
+ # Convert disk present in template if is set
+ if self.params['convert']:
+ for device in vm_obj.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualDisk):
+ disk_locator = vim.vm.RelocateSpec.DiskLocator()
+ disk_locator.diskBackingInfo = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+ if self.params['convert'] == 'thin':
+ disk_locator.diskBackingInfo.thinProvisioned = True
+ if self.params['convert'] == 'eagerzeroedthick':
+ disk_locator.diskBackingInfo.eagerlyScrub = True
+ if self.params['convert'] == 'thick':
+ disk_locator.diskBackingInfo.diskMode = "persistent"
+ disk_locator.diskId = device.key
+ disk_locator.datastore = datastore
+ self.relospec.disk.append(disk_locator)
+
+ # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
+ # > pool: For a clone operation from a template to a virtual machine, this argument is required.
+ self.relospec.pool = resource_pool
+ linked_clone = self.params.get('linked_clone')
+ snapshot_src = self.params.get('snapshot_src', None)
+ if linked_clone:
+ if snapshot_src is not None:
+ self.relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking
+ else:
+ self.module.fail_json(msg="Parameter 'linked_src' and 'snapshot_src' are"
+ " required together for linked clone operation.")
+
+ clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=self.relospec)
+ if self.customspec:
+ clonespec.customization = self.customspec
+
+ if snapshot_src is not None:
+ if vm_obj.snapshot is None:
+ self.module.fail_json(msg="No snapshots present for virtual machine or template [%(template)s]" % self.params)
+ snapshot = self.get_snapshots_by_name_recursively(snapshots=vm_obj.snapshot.rootSnapshotList,
+ snapname=snapshot_src)
+ if len(snapshot) != 1:
+ self.module.fail_json(msg='virtual machine "%(template)s" does not contain'
+ ' snapshot named "%(snapshot_src)s"' % self.params)
+
+ clonespec.snapshot = snapshot[0].snapshot
+
+ clonespec.config = self.configspec
+ clone_method = 'Clone'
+ try:
+ task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
+ except vim.fault.NoPermission as e:
+ self.module.fail_json(msg="Failed to clone virtual machine %s to folder %s "
+ "due to permission issue: %s" % (self.params['name'],
+ destfolder,
+ to_native(e.msg)))
+ self.change_detected = True
+ else:
+ # ConfigSpec require name for VM creation
+ self.configspec.name = self.params['name']
+ self.configspec.files = vim.vm.FileInfo(logDirectory=None,
+ snapshotDirectory=None,
+ suspendDirectory=None,
+ vmPathName="[" + datastore_name + "]")
+ esx_host = None
+ # Only select specific host when ESXi hostname is provided
+ if self.params['esxi_hostname']:
+ esx_host = self.select_host()
+
+ clone_method = 'CreateVM_Task'
+ try:
+ task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool, host=esx_host)
+ except vmodl.fault.InvalidRequest as e:
+ self.module.fail_json(msg="Failed to create virtual machine due to invalid configuration "
+ "parameter %s" % to_native(e.msg))
+ except vim.fault.RestrictedVersion as e:
+ self.module.fail_json(msg="Failed to create virtual machine due to "
+ "product versioning restrictions: %s" % to_native(e.msg))
+ self.change_detected = True
+ self.wait_for_task(task)
+ except TypeError as e:
+ self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs. %s" % to_text(e))
+
+ if task.info.state == 'error':
+ # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
+ # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
+
+ # provide these to the user for debugging
+ clonespec_json = serialize_spec(clonespec)
+ configspec_json = serialize_spec(self.configspec)
+ kwargs = {
+ 'changed': self.change_applied,
+ 'failed': True,
+ 'msg': task.info.error.msg,
+ 'clonespec': clonespec_json,
+ 'configspec': configspec_json,
+ 'clone_method': clone_method
+ }
+
+ return kwargs
+ else:
+ # set annotation
+ vm = task.info.result
+ if self.params['annotation']:
+ annotation_spec = vim.vm.ConfigSpec()
+ annotation_spec.annotation = str(self.params['annotation'])
+ task = vm.ReconfigVM_Task(annotation_spec)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'annotation'}
+
+ if self.params['advanced_settings']:
+ vm_custom_spec = vim.vm.ConfigSpec()
+ self.customize_advanced_settings(vm_obj=vm, config_spec=vm_custom_spec)
+ task = vm.ReconfigVM_Task(vm_custom_spec)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'advanced_settings'}
+
+ if self.params['customvalues']:
+ self.customize_customvalues(vm_obj=vm)
+
+ if self.params['wait_for_ip_address'] or self.params['wait_for_customization'] or self.params['state'] in ['poweredon', 'powered-on', 'restarted']:
+ set_vm_power_state(self.content, vm, 'poweredon', force=False)
+
+ if self.params['wait_for_ip_address']:
+ wait_for_vm_ip(self.content, vm, self.params['wait_for_ip_address_timeout'])
+
+ if self.params['wait_for_customization']:
+ is_customization_ok = self.wait_for_customization(vm=vm, timeout=self.params['wait_for_customization_timeout'])
+ if not is_customization_ok:
+ vm_facts = self.gather_facts(vm)
+ return {'changed': self.change_applied, 'failed': True,
+ 'msg': 'Customization failed. For detailed information see warnings',
+ 'instance': vm_facts, 'op': 'customization'}
+
+ vm_facts = self.gather_facts(vm)
+ return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
+
+ def get_snapshots_by_name_recursively(self, snapshots, snapname):
+ snap_obj = []
+ for snapshot in snapshots:
+ if snapshot.name == snapname:
+ snap_obj.append(snapshot)
+ else:
+ snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname)
+ return snap_obj
+
+ def reconfigure_vm(self):
+ self.configspec = vim.vm.ConfigSpec()
+ self.configspec.deviceChange = []
+ # create the relocation spec
+ self.relospec = vim.vm.RelocateSpec()
+ self.relospec.deviceChange = []
+ self.configure_guestid(vm_obj=self.current_vm_obj)
+ self.configure_cpu_and_memory(vm_obj=self.current_vm_obj)
+ self.configure_hardware_params(vm_obj=self.current_vm_obj)
+ self.configure_disks(vm_obj=self.current_vm_obj)
+ self.configure_network(vm_obj=self.current_vm_obj)
+ self.configure_cdrom(vm_obj=self.current_vm_obj)
+ self.configure_nvdimm(vm_obj=self.current_vm_obj)
+ self.customize_advanced_settings(vm_obj=self.current_vm_obj, config_spec=self.configspec)
+ self.customize_customvalues(vm_obj=self.current_vm_obj)
+ self.configure_resource_alloc_info(vm_obj=self.current_vm_obj)
+ self.configure_vapp_properties(vm_obj=self.current_vm_obj)
+
+ if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']:
+ self.configspec.annotation = str(self.params['annotation'])
+ self.change_detected = True
+
+ if self.params['resource_pool']:
+ self.relospec.pool = self.get_resource_pool()
+
+ if self.relospec.pool != self.current_vm_obj.resourcePool:
+ self.tracked_changes['resourcePool'] = str(self.relospec.pool)
+ if self.module.check_mode:
+ self.change_applied = True
+ else:
+ task = self.current_vm_obj.RelocateVM_Task(spec=self.relospec)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'relocate'}
+
+ # Only send VMware task if we see a modification
+ if self.change_detected:
+ self.tracked_changes['configspec'] = str(self.configspec)
+ if self.module.check_mode:
+ self.change_applied = True
+ else:
+ task = None
+ try:
+ task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec)
+ except vim.fault.RestrictedVersion as e:
+ self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
+ " product versioning restrictions: %s" % to_native(e.msg))
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'reconfig'}
+
+ # Rename VM
+ if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name:
+ self.tracked_changes['name'] = self.params['name']
+ if self.module.check_mode:
+ self.change_applied = True
+ else:
+ task = self.current_vm_obj.Rename_Task(self.params['name'])
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'rename'}
+
+ # Mark VM as Template
+ if self.params['is_template'] and not self.current_vm_obj.config.template:
+ try:
+ if not self.module.check_mode:
+ self.current_vm_obj.MarkAsTemplate()
+ self.change_applied = True
+ self.tracked_changes['MarkAsTemplate'] = True
+ except vmodl.fault.NotSupported as e:
+ self.module.fail_json(msg="Failed to mark virtual machine [%s] "
+ "as template: %s" % (self.params['name'], e.msg))
+
+ # Mark Template as VM
+ elif not self.params['is_template'] and self.current_vm_obj.config.template:
+ resource_pool = self.get_resource_pool()
+ kwargs = dict(pool=resource_pool)
+
+ if self.params.get('esxi_hostname', None):
+ host_system_obj = self.select_host()
+ kwargs.update(host=host_system_obj)
+
+ try:
+ if not self.module.check_mode:
+ self.current_vm_obj.MarkAsVirtualMachine(**kwargs)
+ self.change_applied = True
+ self.tracked_changes['MarkAsVirtualMachine'] = True
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(msg="Virtual machine is not marked"
+ " as template : %s" % to_native(invalid_state.msg))
+ except vim.fault.InvalidDatastore as invalid_ds:
+ self.module.fail_json(msg="Converting template to virtual machine"
+ " operation cannot be performed on the"
+ " target datastores: %s" % to_native(invalid_ds.msg))
+ except vim.fault.CannotAccessVmComponent as cannot_access:
+ self.module.fail_json(msg="Failed to convert template to virtual machine"
+ " as operation unable access virtual machine"
+ " component: %s" % to_native(cannot_access.msg))
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(msg="Failed to convert template to virtual machine"
+ " due to : %s" % to_native(invalid_argument.msg))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to convert template to virtual machine"
+ " due to generic error : %s" % to_native(generic_exc))
+
+ # add customize existing VM after VM re-configure
+ if self.params['customization']['existing_vm']:
+ if self.current_vm_obj.config.template:
+ self.module.fail_json(msg="VM is template, not support guest OS customization.")
+ if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff and not self.module.check_mode:
+ self.module.fail_json(msg="VM is not in poweroff state, can not do guest OS customization.")
+ # TODO not sure if it is possible to query the current customspec to compare against the one being provided to check in check mode.
+ # Maybe by breaking down the individual fields and querying, but it needs more research.
+ # For now, assume changed...
+ self.tracked_changes['customization'] = True
+ if self.module.check_mode:
+ self.change_applied = True
+ else:
+ cus_result = self.customize_exist_vm()
+ if cus_result['failed']:
+ return cus_result
+
+ vm_facts = self.gather_facts(self.current_vm_obj)
+ return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts, 'changes': self.tracked_changes}
+
+ def customize_exist_vm(self):
+ task = None
+ # Find if we need network customizations (find keys in dictionary that requires customizations)
+ network_changes = False
+ for nw in self.params['networks']:
+ for key in nw:
+ # We don't need customizations for these keys
+ if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected', 'dvswitch_name'):
+ network_changes = True
+ break
+ if any(v is not None for v in self.params['customization'].values()) or network_changes or self.params.get('customization_spec'):
+ self.customize_vm(vm_obj=self.current_vm_obj)
+ try:
+ task = self.current_vm_obj.CustomizeVM_Task(self.customspec)
+ except vim.fault.CustomizationFault as e:
+ self.module.fail_json(msg="Failed to customization virtual machine due to CustomizationFault: %s" % to_native(e.msg))
+ except vim.fault.RuntimeFault as e:
+ self.module.fail_json(msg="failed to customization virtual machine due to RuntimeFault: %s" % to_native(e.msg))
+ except Exception as e:
+ self.module.fail_json(msg="failed to customization virtual machine due to fault: %s" % to_native(e.msg))
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customize_exist'}
+
+ if self.params['wait_for_customization']:
+ set_vm_power_state(self.content, self.current_vm_obj, 'poweredon', force=False)
+ is_customization_ok = self.wait_for_customization(vm=self.current_vm_obj, timeout=self.params['wait_for_customization_timeout'])
+ if not is_customization_ok:
+ return {'changed': self.change_applied, 'failed': True,
+ 'msg': 'Customization failed. For detailed information see warnings',
+ 'op': 'wait_for_customize_exist'}
+
+ return {'changed': self.change_applied, 'failed': False}
+
+ def wait_for_task(self, task, poll_interval=1):
+ """
+ Wait for a VMware task to complete. Terminal states are 'error' and 'success'.
+
+ Inputs:
+ - task: the task to wait for
+ - poll_interval: polling interval to check the task, in seconds
+
+ Modifies:
+ - self.change_applied
+ """
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
+ # https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
+ while task.info.state not in ['error', 'success']:
+ time.sleep(poll_interval)
+ self.change_applied = self.change_applied or task.info.state == 'success'
+
+ def get_vm_events(self, vm, eventTypeIdList):
+ byEntity = vim.event.EventFilterSpec.ByEntity(entity=vm, recursion="self")
+ filterSpec = vim.event.EventFilterSpec(entity=byEntity, eventTypeId=eventTypeIdList)
+ eventManager = self.content.eventManager
+ return eventManager.QueryEvent(filterSpec)
+
+ def wait_for_customization(self, vm, timeout=3600, sleep=10):
+ poll = int(timeout // sleep)
+ thispoll = 0
+ while thispoll <= poll:
+ eventStarted = self.get_vm_events(vm, ['CustomizationStartedEvent'])
+ if len(eventStarted):
+ thispoll = 0
+ while thispoll <= poll:
+ eventsFinishedResult = self.get_vm_events(vm, ['CustomizationSucceeded', 'CustomizationFailed'])
+ if len(eventsFinishedResult):
+ if not isinstance(eventsFinishedResult[0], vim.event.CustomizationSucceeded):
+ self.module.warn("Customization failed with error {%s}:{%s}"
+ % (eventsFinishedResult[0]._wsdlName, eventsFinishedResult[0].fullFormattedMessage))
+ return False
+ else:
+ return True
+ else:
+ time.sleep(sleep)
+ thispoll += 1
+ if len(eventsFinishedResult) == 0:
+ self.module.warn('Waiting for customization result event timed out.')
+ return False
+ else:
+ time.sleep(sleep)
+ thispoll += 1
+ if len(eventStarted):
+ self.module.warn('Waiting for customization result event timed out.')
+ else:
+ self.module.warn('Waiting for customization start event timed out.')
+ return False
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['absent', 'poweredoff', 'powered-off',
+ 'poweredon', 'powered-on', 'present',
+ 'rebootguest', 'reboot-guest', 'restarted',
+ 'shutdownguest', 'shutdown-guest', 'suspended']),
+ template=dict(type='str', aliases=['template_src']),
+ is_template=dict(type='bool', default=False),
+ annotation=dict(type='str', aliases=['notes']),
+ customvalues=dict(type='list', default=[], elements='dict'),
+ advanced_settings=dict(type='list', default=[], elements='dict'),
+ name=dict(type='str'),
+ name_match=dict(type='str', choices=['first', 'last'], default='first'),
+ uuid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ guest_id=dict(type='str'),
+ disk=dict(
+ type='list',
+ default=[],
+ elements='dict',
+ options=dict(
+ autoselect_datastore=dict(type='bool'),
+ controller_number=dict(type='int', choices=[0, 1, 2, 3]),
+ controller_type=dict(type='str', choices=['buslogic', 'lsilogic', 'paravirtual', 'lsilogicsas', 'sata', 'nvme']),
+ datastore=dict(type='str'),
+ disk_mode=dict(type='str', choices=['persistent', 'independent_persistent', 'independent_nonpersistent']),
+ filename=dict(type='str'),
+ size=dict(type='str'),
+ size_gb=dict(type='int'),
+ size_kb=dict(type='int'),
+ size_mb=dict(type='int'),
+ size_tb=dict(type='int'),
+ type=dict(type='str', choices=['thin', 'eagerzeroedthick', 'thick']),
+ unit_number=dict(type='int'),
+ )
+ ),
+ nvdimm=dict(
+ type='dict',
+ default={},
+ options=dict(
+ state=dict(type='str', choices=['present', 'absent']),
+ label=dict(type='str'),
+ size_mb=dict(type='int', default=1024),
+ )
+ ),
+ cdrom=dict(type='raw', default=[]),
+ hardware=dict(
+ type='dict',
+ default={},
+ options=dict(
+ boot_firmware=dict(type='str', choices=['bios', 'efi']),
+ cpu_limit=dict(type='int'),
+ cpu_reservation=dict(type='int'),
+ hotadd_cpu=dict(type='bool'),
+ hotadd_memory=dict(type='bool'),
+ hotremove_cpu=dict(type='bool'),
+ vpmc_enabled=dict(type='bool'),
+ max_connections=dict(type='int'),
+ mem_limit=dict(type='int'),
+ cpu_shares_level=dict(type='str', choices=['low', 'normal', 'high', 'custom']),
+ mem_shares_level=dict(type='str', choices=['low', 'normal', 'high', 'custom']),
+ cpu_shares=dict(type='int'),
+ mem_shares=dict(type='int'),
+ mem_reservation=dict(type='int', aliases=['memory_reservation']),
+ memory_mb=dict(type='int'),
+ memory_reservation_lock=dict(type='bool'),
+ nested_virt=dict(type='bool'),
+ num_cpu_cores_per_socket=dict(type='int'),
+ num_cpus=dict(type='int'),
+ scsi=dict(type='str', choices=['buslogic', 'lsilogic', 'lsilogicsas', 'paravirtual']),
+ secure_boot=dict(type='bool'),
+ version=dict(type='str'),
+ virt_based_security=dict(type='bool'),
+ iommu=dict(type='bool')
+ )),
+ force=dict(type='bool', default=False),
+ datacenter=dict(type='str', default='ha-datacenter'),
+ esxi_hostname=dict(type='str'),
+ cluster=dict(type='str'),
+ wait_for_ip_address=dict(type='bool', default=False),
+ wait_for_ip_address_timeout=dict(type='int', default=300),
+ state_change_timeout=dict(type='int', default=0),
+ snapshot_src=dict(type='str'),
+ linked_clone=dict(type='bool', default=False),
+ networks=dict(type='list', default=[], elements='dict'),
+ resource_pool=dict(type='str'),
+ customization=dict(
+ type='dict',
+ default={},
+ options=dict(
+ autologon=dict(type='bool'),
+ autologoncount=dict(type='int'),
+ dns_servers=dict(type='list', elements='str'),
+ dns_suffix=dict(type='list', elements='str'),
+ domain=dict(type='str'),
+ domainadmin=dict(type='str'),
+ domainadminpassword=dict(type='str', no_log=True),
+ existing_vm=dict(type='bool'),
+ fullname=dict(type='str'),
+ hostname=dict(type='str'),
+ hwclockUTC=dict(type='bool'),
+ joindomain=dict(type='str'),
+ joinworkgroup=dict(type='str'),
+ orgname=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ productid=dict(type='str'),
+ runonce=dict(type='list', elements='str'),
+ script_text=dict(type='str'),
+ timezone=dict(type='str')
+ )),
+ customization_spec=dict(type='str', default=None),
+ wait_for_customization=dict(type='bool', default=False),
+ wait_for_customization_timeout=dict(type='int', default=3600),
+ vapp_properties=dict(type='list', default=[], elements='dict'),
+ datastore=dict(type='str'),
+ convert=dict(type='str', choices=['thin', 'thick', 'eagerzeroedthick']),
+ delete_from_inventory=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['cluster', 'esxi_hostname'],
+ ],
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+ result = {'failed': False, 'changed': False}
+ pyv = PyVmomiHelper(module)
+
+ # Check requirements for virtualization based security
+ if pyv.params['hardware']['virt_based_security']:
+ if not pyv.params['hardware']['nested_virt']:
+ pyv.module.fail_json(msg="Virtualization based security requires nested virtualization. Please enable nested_virt.")
+
+ if not pyv.params['hardware']['secure_boot']:
+ pyv.module.fail_json(msg="Virtualization based security requires (U)EFI secure boot. Please enable secure_boot.")
+
+ if not pyv.params['hardware']['iommu']:
+ pyv.module.fail_json(msg="Virtualization based security requires I/O MMU. Please enable iommu.")
+
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ # VM already exists
+ if vm:
+ if module.params['state'] == 'absent':
+ # destroy it
+ if module.check_mode:
+ result.update(
+ vm_name=vm.name,
+ changed=True,
+ current_powerstate=vm.summary.runtime.powerState.lower(),
+ desired_operation='remove_vm',
+ )
+ module.exit_json(**result)
+ if module.params['force']:
+ # has to be poweredoff first
+ set_vm_power_state(pyv.content, vm, 'poweredoff', module.params['force'])
+ result = pyv.remove_vm(vm, module.params['delete_from_inventory'])
+ elif module.params['state'] == 'present':
+ # Note that check_mode is handled inside reconfigure_vm
+ result = pyv.reconfigure_vm()
+ elif module.params['state'] in ['poweredon', 'powered-on', 'poweredoff',
+ 'powered-off', 'restarted', 'suspended',
+ 'shutdownguest', 'shutdown-guest',
+ 'rebootguest', 'reboot-guest']:
+ if module.check_mode:
+ # Identify if the power state would have changed if not in check mode
+ current_powerstate = vm.summary.runtime.powerState.lower()
+ powerstate_will_change = False
+ if ((current_powerstate == 'poweredon' and module.params['state'] not in ['poweredon', 'powered-on'])
+ or (current_powerstate == 'poweredoff' and module.params['state']
+ not in ['poweredoff', 'powered-off', 'shutdownguest', 'shutdown-guest'])
+ or (current_powerstate == 'suspended' and module.params['state'] != 'suspended')):
+ powerstate_will_change = True
+
+ result.update(
+ vm_name=vm.name,
+ changed=powerstate_will_change,
+ current_powerstate=current_powerstate,
+ desired_operation='set_vm_power_state',
+ )
+ module.exit_json(**result)
+ # set powerstate
+ tmp_result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'])
+ if tmp_result['changed']:
+ result["changed"] = True
+ if module.params['state'] in ['poweredon', 'powered-on', 'restarted', 'rebootguest', 'reboot-guest'] and module.params['wait_for_ip_address']:
+ wait_result = wait_for_vm_ip(pyv.content, vm, module.params['wait_for_ip_address_timeout'])
+ if not wait_result:
+ module.fail_json(msg='Waiting for IP address timed out')
+ tmp_result['instance'] = wait_result
+ if not tmp_result["failed"]:
+ result["failed"] = False
+ result['instance'] = tmp_result['instance']
+ if tmp_result["failed"]:
+ result["failed"] = True
+ result["msg"] = tmp_result["msg"]
+ else:
+ # This should not happen
+ raise AssertionError()
+ # VM doesn't exist
+ else:
+ if module.params['state'] in ['poweredon', 'powered-on', 'poweredoff', 'powered-off',
+ 'present', 'restarted', 'suspended']:
+ if module.check_mode:
+ result.update(
+ changed=True,
+ desired_operation='deploy_vm',
+ )
+ module.exit_json(**result)
+ result = pyv.deploy_vm()
+ if result['failed']:
+ module.fail_json(msg='Failed to create a virtual machine : %s' % result['msg'])
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_boot_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_boot_info.py
new file mode 100644
index 000000000..f440d3704
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_boot_info.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_boot_info
+short_description: Gather info about boot options for the given virtual machine
+description:
+ - Gather information about boot options for the given virtual machine.
+author:
+ - Abhijeet Kasurde (@Akasurde)
+options:
+ name:
+ description:
+ - Name of the VM to work with.
+ - This is required if C(uuid) or C(moid) parameter is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to manage if known, this is VMware's BIOS UUID by default.
+ - This is required if C(name) or C(moid) parameter is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ name_match:
+ description:
+ - If multiple virtual machines matching the name, use the first or last found.
+ default: 'first'
+ choices: ['first', 'last']
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info about virtual machine's boot order and related parameters
+ community.vmware.vmware_guest_boot_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "{{ vm_name }}"
+ register: vm_boot_order_info
+
+- name: Gather information about virtual machine's boot order using MoID
+ community.vmware.vmware_guest_boot_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ moid: "vm-42"
+ register: vm_moid_boot_order_info
+'''
+
+RETURN = r'''
+vm_boot_info:
+ description: metadata about boot order of virtual machine
+ returned: always
+ type: dict
+ sample: {
+ "current_boot_order": [
+ "floppy",
+ "disk",
+ "ethernet",
+ "cdrom"
+ ],
+ "current_boot_delay": 2000,
+ "current_boot_retry_delay": 22300,
+ "current_boot_retry_enabled": true,
+ "current_enter_bios_setup": true,
+ "current_boot_firmware": "bios",
+ "current_secure_boot_enabled": false,
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id
+
+try:
+ from pyVmomi import vim, VmomiSupport
+except ImportError:
+ pass
+
+
+class VmBootInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(VmBootInfoManager, self).__init__(module)
+ self.name = self.params['name']
+ self.uuid = self.params['uuid']
+ self.moid = self.params['moid']
+ self.use_instance_uuid = self.params['use_instance_uuid']
+ self.vm = None
+
+ def _get_vm(self):
+ vms = []
+
+ if self.uuid:
+ if self.use_instance_uuid:
+ vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="use_instance_uuid")
+ else:
+ vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid")
+ if vm_obj is None:
+ self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid)
+ vms = [vm_obj]
+
+ elif self.name:
+ objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
+ for temp_vm_object in objects:
+ if temp_vm_object.obj.name == self.name:
+ vms.append(temp_vm_object.obj)
+
+ elif self.moid:
+ vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.module.params['moid'], self.si._stub)
+ if vm_obj:
+ vms.append(vm_obj)
+
+ if vms:
+ if self.params.get('name_match') == 'first':
+ self.vm = vms[0]
+ elif self.params.get('name_match') == 'last':
+ self.vm = vms[-1]
+ else:
+ self.module.fail_json(msg="Failed to find virtual machine using %s" % (self.name or self.uuid or self.moid))
+
+ @staticmethod
+ def humanize_boot_order(boot_order):
+ results = []
+ for device in boot_order:
+ if isinstance(device, vim.vm.BootOptions.BootableCdromDevice):
+ results.append('cdrom')
+ elif isinstance(device, vim.vm.BootOptions.BootableDiskDevice):
+ results.append('disk')
+ elif isinstance(device, vim.vm.BootOptions.BootableEthernetDevice):
+ results.append('ethernet')
+ elif isinstance(device, vim.vm.BootOptions.BootableFloppyDevice):
+ results.append('floppy')
+ return results
+
+ def ensure(self):
+ self._get_vm()
+
+ results = dict()
+ if self.vm and self.vm.config:
+ results = dict(
+ current_boot_order=self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
+ current_boot_delay=self.vm.config.bootOptions.bootDelay,
+ current_enter_bios_setup=self.vm.config.bootOptions.enterBIOSSetup,
+ current_boot_retry_enabled=self.vm.config.bootOptions.bootRetryEnabled,
+ current_boot_retry_delay=self.vm.config.bootOptions.bootRetryDelay,
+ current_boot_firmware=self.vm.config.firmware,
+ current_secure_boot_enabled=self.vm.config.bootOptions.efiSecureBootEnabled
+ )
+
+ self.module.exit_json(changed=False, vm_boot_info=results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ name_match=dict(
+ choices=['first', 'last'],
+ default='first'
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ mutually_exclusive=[
+ ['name', 'uuid', 'moid']
+ ],
+ supports_check_mode=True,
+ )
+
+ pyv = VmBootInfoManager(module)
+ pyv.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_boot_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_boot_manager.py
new file mode 100644
index 000000000..63be3ae0c
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_boot_manager.py
@@ -0,0 +1,421 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_boot_manager
+short_description: Manage boot options for the given virtual machine
+description:
+ - This module can be used to manage boot options for the given virtual machine.
+author:
+ - Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
+options:
+ name:
+ description:
+ - Name of the VM to work with.
+ - This is required if C(uuid) or C(moid) parameter is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to manage if known, this is VMware's BIOS UUID by default.
+ - This is required if C(name) or C(moid) parameter is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ name_match:
+ description:
+ - If multiple virtual machines matching the name, use the first or last found.
+ default: 'first'
+ choices: ['first', 'last']
+ type: str
+ boot_order:
+ description:
+ - List of the boot devices.
+ default: []
+ type: list
+ elements: str
+ boot_hdd_name:
+ description:
+ - Name of disk to be set as boot disk, which is case sensitive, e.g., 'Hard disk 1'.
+ - This parameter is optional, if not set, will use the first virtual disk found in VM device list.
+ type: str
+ version_added: '3.2.0'
+ boot_delay:
+ description:
+ - Delay in milliseconds before starting the boot sequence.
+ type: int
+ enter_bios_setup:
+ description:
+ - If set to C(true), the virtual machine automatically enters BIOS setup the next time it boots.
+ - The virtual machine resets this flag, so that the machine boots proceeds normally.
+ type: 'bool'
+ boot_retry_enabled:
+ description:
+ - If set to C(true), the virtual machine that fails to boot, will try to boot again after C(boot_retry_delay) is expired.
+ - If set to C(false), the virtual machine waits indefinitely for user intervention.
+ type: 'bool'
+ boot_retry_delay:
+ description:
+ - Specify the time in milliseconds between virtual machine boot failure and subsequent attempt to boot again.
+ - If set, will automatically set C(boot_retry_enabled) to C(true) as this parameter is required.
+ type: int
+ boot_firmware:
+ description:
+ - Choose which firmware should be used to boot the virtual machine.
+ choices: ["bios", "efi"]
+ type: str
+ secure_boot_enabled:
+ description:
+ - Choose if EFI secure boot should be enabled. EFI secure boot can only be enabled with boot_firmware = efi
+ type: 'bool'
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Change virtual machine's boot order and related parameters
+ community.vmware.vmware_guest_boot_manager:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: testvm
+ boot_delay: 2000
+ enter_bios_setup: true
+ boot_retry_enabled: true
+ boot_retry_delay: 22300
+ boot_firmware: bios
+ secure_boot_enabled: false
+ boot_order:
+ - floppy
+ - cdrom
+ - ethernet
+ - disk
+ delegate_to: localhost
+ register: vm_boot_order
+
+- name: Change virtual machine's boot order using Virtual Machine MoID
+ community.vmware.vmware_guest_boot_manager:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ moid: vm-42
+ boot_delay: 2000
+ enter_bios_setup: true
+ boot_retry_enabled: true
+ boot_retry_delay: 22300
+ boot_firmware: bios
+ secure_boot_enabled: false
+ boot_order:
+ - floppy
+ - cdrom
+ - ethernet
+ - disk
+ delegate_to: localhost
+ register: vm_boot_order
+'''
+
+RETURN = r'''
+vm_boot_status:
+ description: metadata about boot order of virtual machine
+ returned: always
+ type: dict
+ sample: {
+ "current_boot_order": [
+ "floppy",
+ "disk",
+ "ethernet",
+ "cdrom"
+ ],
+ "current_boot_delay": 2000,
+ "current_boot_retry_delay": 22300,
+ "current_boot_retry_enabled": true,
+ "current_enter_bios_setup": true,
+ "current_boot_firmware": "bios",
+ "current_secure_boot_enabled": false,
+ "previous_boot_delay": 10,
+ "previous_boot_retry_delay": 10000,
+ "previous_boot_retry_enabled": true,
+ "previous_enter_bios_setup": false,
+ "previous_boot_firmware": "efi",
+ "previous_secure_boot_enabled": true,
+ "previous_boot_order": [
+ "ethernet",
+ "cdrom",
+ "floppy",
+ "disk"
+ ],
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id, wait_for_task, TaskError
+
+try:
+ from pyVmomi import vim, VmomiSupport
+except ImportError:
+ pass
+
+
+class VmBootManager(PyVmomi):
+ def __init__(self, module):
+ super(VmBootManager, self).__init__(module)
+ self.name = self.params['name']
+ self.uuid = self.params['uuid']
+ self.moid = self.params['moid']
+ self.use_instance_uuid = self.params['use_instance_uuid']
+ self.vm = None
+
+ def _get_vm(self):
+ vms = []
+
+ if self.uuid:
+ if self.use_instance_uuid:
+ vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="instance_uuid")
+ else:
+ vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid")
+ if vm_obj is None:
+ self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid)
+ vms = [vm_obj]
+
+ elif self.name:
+ objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
+ for temp_vm_object in objects:
+ if temp_vm_object.obj.name == self.name:
+ vms.append(temp_vm_object.obj)
+
+ elif self.moid:
+ vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.module.params['moid'], self.si._stub)
+ if vm_obj:
+ vms.append(vm_obj)
+
+ if vms:
+ if self.params.get('name_match') == 'first':
+ self.vm = vms[0]
+ elif self.params.get('name_match') == 'last':
+ self.vm = vms[-1]
+ else:
+ self.module.fail_json(msg="Failed to find virtual machine using %s" % (self.name or self.uuid))
+
+ @staticmethod
+ def humanize_boot_order(boot_order):
+ results = []
+ for device in boot_order:
+ if isinstance(device, vim.vm.BootOptions.BootableCdromDevice):
+ results.append('cdrom')
+ elif isinstance(device, vim.vm.BootOptions.BootableDiskDevice):
+ results.append('disk')
+ elif isinstance(device, vim.vm.BootOptions.BootableEthernetDevice):
+ results.append('ethernet')
+ elif isinstance(device, vim.vm.BootOptions.BootableFloppyDevice):
+ results.append('floppy')
+ return results
+
+ def ensure(self):
+ boot_order_list = []
+ change_needed = False
+ kwargs = dict()
+ previous_boot_disk = None
+ valid_device_strings = ['cdrom', 'disk', 'ethernet', 'floppy']
+
+ self._get_vm()
+
+ for device_order in self.params.get('boot_order'):
+ if device_order not in valid_device_strings:
+ self.module.fail_json(msg="Invalid device found [%s], please specify device from ['%s']" % (device_order,
+ "', '".join(valid_device_strings)))
+ if device_order == 'cdrom':
+ first_cdrom = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualCdrom)]
+ if first_cdrom:
+ boot_order_list.append(vim.vm.BootOptions.BootableCdromDevice())
+ elif device_order == 'disk':
+ if not self.params.get('boot_hdd_name'):
+ first_hdd = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)]
+ else:
+ first_hdd = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)
+ and device.deviceInfo.label == self.params.get('boot_hdd_name')]
+ if not first_hdd:
+ self.module.fail_json(msg="Not found virtual disk with disk label '%s'" % (self.params.get('boot_hdd_name')))
+ if first_hdd:
+ boot_order_list.append(vim.vm.BootOptions.BootableDiskDevice(deviceKey=first_hdd[0].key))
+ elif device_order == 'ethernet':
+ first_ether = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualEthernetCard)]
+ if first_ether:
+ boot_order_list.append(vim.vm.BootOptions.BootableEthernetDevice(deviceKey=first_ether[0].key))
+ elif device_order == 'floppy':
+ first_floppy = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualFloppy)]
+ if first_floppy:
+ boot_order_list.append(vim.vm.BootOptions.BootableFloppyDevice())
+
+ # Get previous boot disk name when boot_hdd_name is set
+ if self.params.get('boot_hdd_name'):
+ for i in range(0, len(self.vm.config.bootOptions.bootOrder)):
+ if isinstance(self.vm.config.bootOptions.bootOrder[i], vim.vm.BootOptions.BootableDiskDevice):
+ if self.vm.config.bootOptions.bootOrder[i].deviceKey:
+ for dev in self.vm.config.hardware.device:
+ if isinstance(dev, vim.vm.device.VirtualDisk) and \
+ dev.key == self.vm.config.bootOptions.bootOrder[i].deviceKey:
+ previous_boot_disk = dev.deviceInfo.label
+
+ if len(boot_order_list) != len(self.vm.config.bootOptions.bootOrder):
+ kwargs.update({'bootOrder': boot_order_list})
+ change_needed = True
+ else:
+ for i in range(0, len(boot_order_list)):
+ boot_device_type = type(boot_order_list[i])
+ vm_boot_device_type = type(self.vm.config.bootOptions.bootOrder[i])
+ if boot_device_type != vm_boot_device_type:
+ kwargs.update({'bootOrder': boot_order_list})
+ change_needed = True
+ else:
+ if vm_boot_device_type is vim.vm.BootOptions.BootableDiskDevice and \
+ boot_order_list[i].deviceKey != self.vm.config.bootOptions.bootOrder[i].deviceKey:
+ kwargs.update({'bootOrder': boot_order_list})
+ change_needed = True
+
+ if self.params.get('boot_delay') is not None and \
+ self.vm.config.bootOptions.bootDelay != self.params.get('boot_delay'):
+ kwargs.update({'bootDelay': self.params.get('boot_delay')})
+ change_needed = True
+
+ if self.params.get('enter_bios_setup') is not None and \
+ self.vm.config.bootOptions.enterBIOSSetup != self.params.get('enter_bios_setup'):
+ kwargs.update({'enterBIOSSetup': self.params.get('enter_bios_setup')})
+ change_needed = True
+
+ if self.params.get('boot_retry_enabled') is not None and \
+ self.vm.config.bootOptions.bootRetryEnabled != self.params.get('boot_retry_enabled'):
+ kwargs.update({'bootRetryEnabled': self.params.get('boot_retry_enabled')})
+ change_needed = True
+
+ if self.params.get('boot_retry_delay') is not None and \
+ self.vm.config.bootOptions.bootRetryDelay != self.params.get('boot_retry_delay'):
+ if not self.vm.config.bootOptions.bootRetryEnabled:
+ kwargs.update({'bootRetryEnabled': True})
+ kwargs.update({'bootRetryDelay': self.params.get('boot_retry_delay')})
+ change_needed = True
+
+ boot_firmware_required = False
+ if self.params.get('boot_firmware') is not None and self.vm.config.firmware != self.params.get('boot_firmware'):
+ change_needed = True
+ boot_firmware_required = True
+
+ if self.params.get('secure_boot_enabled') is not None:
+ if self.params.get('secure_boot_enabled') and self.params.get('boot_firmware') == "bios":
+ self.module.fail_json(msg="Secure boot cannot be enabled when boot_firmware = bios")
+ elif self.params.get('secure_boot_enabled') and \
+ self.params.get('boot_firmware') != 'efi' and \
+ self.vm.config.firmware == 'bios':
+ self.module.fail_json(msg="Secure boot cannot be enabled since the VM's boot firmware is currently set to bios")
+ elif self.vm.config.bootOptions.efiSecureBootEnabled != self.params.get('secure_boot_enabled'):
+ kwargs.update({'efiSecureBootEnabled': self.params.get('secure_boot_enabled')})
+ change_needed = True
+
+ changed = False
+ results = dict(
+ previous_boot_order=self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
+ previous_boot_delay=self.vm.config.bootOptions.bootDelay,
+ previous_enter_bios_setup=self.vm.config.bootOptions.enterBIOSSetup,
+ previous_boot_retry_enabled=self.vm.config.bootOptions.bootRetryEnabled,
+ previous_boot_retry_delay=self.vm.config.bootOptions.bootRetryDelay,
+ previous_boot_firmware=self.vm.config.firmware,
+ previous_secure_boot_enabled=self.vm.config.bootOptions.efiSecureBootEnabled,
+ current_boot_order=[]
+ )
+ if previous_boot_disk:
+ results.update({'previous_boot_disk': previous_boot_disk})
+
+ if change_needed:
+ vm_conf = vim.vm.ConfigSpec()
+ vm_conf.bootOptions = vim.vm.BootOptions(**kwargs)
+ if boot_firmware_required:
+ vm_conf.firmware = self.params.get('boot_firmware')
+ task = self.vm.ReconfigVM_Task(vm_conf)
+
+ try:
+ changed, result = wait_for_task(task)
+ except TaskError as e:
+ self.module.fail_json(msg="Failed to perform reconfigure virtual"
+ " machine %s for boot order due to: %s" % (self.name or self.uuid,
+ to_native(e)))
+
+ results.update(
+ {
+ 'current_boot_order': self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
+ 'current_boot_delay': self.vm.config.bootOptions.bootDelay,
+ 'current_enter_bios_setup': self.vm.config.bootOptions.enterBIOSSetup,
+ 'current_boot_retry_enabled': self.vm.config.bootOptions.bootRetryEnabled,
+ 'current_boot_retry_delay': self.vm.config.bootOptions.bootRetryDelay,
+ 'current_boot_firmware': self.vm.config.firmware,
+ 'current_secure_boot_enabled': self.vm.config.bootOptions.efiSecureBootEnabled
+ }
+ )
+ if self.params.get('boot_hdd_name'):
+ results.update({'current_boot_disk': self.params.get('boot_hdd_name')})
+
+ self.module.exit_json(changed=changed, vm_boot_status=results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ name_match=dict(
+ choices=['first', 'last'],
+ default='first'
+ ),
+ boot_order=dict(
+ type='list',
+ default=[],
+ elements='str'
+ ),
+ boot_hdd_name=dict(type='str'),
+ boot_delay=dict(type='int'),
+ enter_bios_setup=dict(type='bool'),
+ boot_retry_enabled=dict(type='bool'),
+ boot_retry_delay=dict(type='int'),
+ secure_boot_enabled=dict(type='bool'),
+ boot_firmware=dict(
+ type='str',
+ choices=['efi', 'bios']
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ mutually_exclusive=[
+ ['name', 'uuid', 'moid']
+ ],
+ )
+
+ pyv = VmBootManager(module)
+ pyv.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_controller.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_controller.py
new file mode 100644
index 000000000..85b7d3438
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_controller.py
@@ -0,0 +1,555 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# Copyright: (c) 2019, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_controller
+short_description: Manage disk or USB controllers related to virtual machine in given vCenter infrastructure
+description:
+ - This module can be used to add, remove disk controllers or USB controllers belonging to given virtual machine.
+ - All parameters and VMware object names are case sensitive.
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+options:
+ name:
+ description:
+ - Name of the virtual machine.
+ - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to gather facts if known, this is VMware's unique identifier.
+ - This is a required parameter, if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is a required parameter, only if multiple VMs are found with same name.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ description:
+ - The datacenter name to which virtual machine belongs to.
+ default: ha-datacenter
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ controllers:
+ description:
+ - A list of disk or USB controllers to add or remove.
+ - Total 4 disk controllers with the same type are allowed per VM.
+ - Total 2 USB controllers are allowed per VM, 1 USB 2.0 and 1 USB 3.0 or 3.1.
+ - For specific guest OS, supported controller types please refer to VMware Compatibility Guide.
+ suboptions:
+ controller_number:
+ description:
+ - Disk controller bus number. When C(state) is set to C(absent), this parameter is required.
+ - When C(type) set to C(usb2) or C(usb3), this parameter is not required.
+ type: int
+ choices:
+ - 0
+ - 1
+ - 2
+ - 3
+ type:
+ description:
+ - Type of disk or USB controller.
+ - From vSphere 6.5 and virtual machine with hardware version 13, C(nvme) controller starts to be supported.
+ required: true
+ type: str
+ choices:
+ - buslogic
+ - lsilogic
+ - lsilogicsas
+ - paravirtual
+ - sata
+ - nvme
+ - usb2
+ - usb3
+ state:
+ description:
+ - Add new controller or remove specified existing controller.
+ - If C(state) is set to C(absent), the specified controller will be removed from virtual machine when there is no disk or device attaching to it.
+ - If specified controller is removed or not exist, no action will be taken only warning message.
+ - If C(state) is set to C(present), new controller with specified type will be added.
+ - If the number of controller with specified controller type reaches it's maximum, no action will be taken only warning message.
+ required: true
+ type: str
+ choices:
+ - present
+ - absent
+ bus_sharing:
+ description:
+ - Bus sharing type for SCSI controller.
+ required: false
+ type: str
+ choices: ['noSharing', 'physicalSharing', 'virtualSharing' ]
+ default: 'noSharing'
+ type: list
+ elements: dict
+ gather_disk_controller_facts:
+ description:
+ - Whether to collect existing disk and USB controllers facts only.
+ - When this parameter is set to C(true), C(controllers) parameter will be ignored.
+ type: bool
+ default: false
+ sleep_time:
+ description:
+ - 'The sleep time in seconds after VM reconfigure task completes, used when not get the updated VM controller
+ facts after VM reconfiguration.'
+ - This parameter is not required. Maximum value is 600.
+ default: 10
+ type: int
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add disk and USB 3.0 controllers for virtual machine located by name
+ community.vmware.vmware_guest_controller:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ name: test_VM
+ controllers:
+ - state: present
+ type: sata
+ - state: present
+ type: nvme
+ - state: present
+ type: usb3
+ delegate_to: localhost
+ register: disk_controller_facts
+
+- name: Remove disk controllers and USB 2.0 from virtual machine located by moid
+ community.vmware.vmware_guest_controller:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ moid: vm-33
+ controllers:
+ - state: absent
+ controller_number: 1
+ type: sata
+ - state: absent
+ controller_number: 0
+ type: nvme
+ - state: absent
+ type: usb2
+ delegate_to: localhost
+ register: disk_controller_facts
+'''
+
+RETURN = r'''
+disk_controller_status:
+ description: metadata about the virtual machine's existing disk controllers or after adding or removing operation
+ returned: always
+ type: dict
+ sample: {
+ "nvme": {
+ "0": {
+ "controller_busnumber": 0,
+ "controller_controllerkey": 100,
+ "controller_devicekey": 31000,
+ "controller_disks_devicekey": [],
+ "controller_label": "NVME controller 0",
+ "controller_summary": "NVME controller 0",
+ "controller_unitnumber": 30
+ }
+ },
+ "sata": {
+ "0": {
+ "controller_busnumber": 0,
+ "controller_controllerkey": 100,
+ "controller_devicekey": 15000,
+ "controller_disks_devicekey": [
+ 16000,
+ 16001
+ ],
+ "controller_label": "SATA controller 0",
+ "controller_summary": "AHCI",
+ "controller_unitnumber": 24
+ }
+ },
+ "scsi": {
+ "0": {
+ "controller_busnumber": 0,
+ "controller_controllerkey": 100,
+ "controller_devicekey": 1000,
+ "controller_disks_devicekey": [
+ 2000
+ ],
+ "controller_label": "SCSI controller 0",
+ "controller_summary": "LSI Logic SAS",
+ "controller_unitnumber": 3,
+ "controller_bus_sharing": 'noSharing'
+ },
+ "1": {
+ "controller_busnumber": 1,
+ "controller_controllerkey": 100,
+ "controller_devicekey": 1001,
+ "controller_disks_devicekey": [],
+ "controller_label": "SCSI controller 1",
+ "controller_summary": "VMware paravirtual SCSI",
+ "controller_unitnumber": 4,
+ "controller_bus_sharing": 'physicalSharing'
+ }
+ },
+ "usb2": {
+ "0": {
+ "controller_busnumber": 0,
+ "controller_controllerkey": 100,
+ "controller_devicekey": 7000,
+ "controller_disks_devicekey": [],
+ "controller_label": "USB Controller",
+ "controller_summary": "Auto connect Disabled",
+ "controller_unitnumber": 22
+ }
+ }
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, TaskError
+from ansible_collections.community.vmware.plugins.module_utils.vm_device_helper import PyVmomiDeviceHelper
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.device_helper = PyVmomiDeviceHelper(self.module)
+ self.sleep_time = 10
+ self.controller_types = self.device_helper.scsi_device_type.copy()
+ self.controller_types.update(self.device_helper.usb_device_type)
+ self.controller_types.update({'sata': self.device_helper.sata_device_type, 'nvme': self.device_helper.nvme_device_type})
+ self.config_spec = vim.vm.ConfigSpec()
+ self.config_spec.deviceChange = []
+ self.change_detected = False
+ self.disk_ctl_bus_num_list = dict(sata=list(range(0, 4)),
+ nvme=list(range(0, 4)),
+ scsi=list(range(0, 4)))
+
+ def get_unused_ctl_bus_number(self):
+ """
+ Get gid of occupied bus numbers of each type of disk controller, update the available bus number list
+ """
+ for device in self.current_vm_obj.config.hardware.device:
+ if isinstance(device, self.device_helper.sata_device_type):
+ if len(self.disk_ctl_bus_num_list['sata']) != 0:
+ self.disk_ctl_bus_num_list['sata'].remove(device.busNumber)
+ if isinstance(device, self.device_helper.nvme_device_type):
+ if len(self.disk_ctl_bus_num_list['nvme']) != 0:
+ self.disk_ctl_bus_num_list['nvme'].remove(device.busNumber)
+ if isinstance(device, tuple(self.device_helper.scsi_device_type.values())):
+ if len(self.disk_ctl_bus_num_list['scsi']) != 0:
+ self.disk_ctl_bus_num_list['scsi'].remove(device.busNumber)
+
+ def check_ctl_disk_exist(self, ctl_type=None, bus_number=None):
+ """
+ Check if controller of specified type exists and if there is disk attaching to it
+ Return: Specified controller device, True or False of attaching disks
+ """
+ ctl_specified = None
+ disks_attached_exist = False
+ if ctl_type is None:
+ return ctl_specified, disks_attached_exist
+
+ for device in self.current_vm_obj.config.hardware.device:
+ if isinstance(device, self.controller_types.get(ctl_type)):
+ if bus_number is not None and device.busNumber != bus_number:
+ continue
+ ctl_specified = device
+ if len(device.device) != 0:
+ disks_attached_exist = True
+ break
+
+ return ctl_specified, disks_attached_exist
+
+ def create_controller(self, ctl_type, bus_sharing, bus_number=0):
+ """
+ Create new disk or USB controller with specified type
+ Args:
+ ctl_type: controller type
+ bus_number: disk controller bus number
+ bus_sharing: noSharing, virtualSharing, physicalSharing
+
+ Return: Virtual device spec for virtual controller
+ """
+ if ctl_type == 'sata' or ctl_type == 'nvme' or ctl_type in self.device_helper.scsi_device_type.keys():
+ disk_ctl = self.device_helper.create_disk_controller(ctl_type, bus_number, bus_sharing)
+ elif ctl_type in self.device_helper.usb_device_type.keys():
+ disk_ctl = vim.vm.device.VirtualDeviceSpec()
+ disk_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ disk_ctl.device = self.device_helper.usb_device_type.get(ctl_type)()
+
+ if ctl_type == 'usb2':
+ disk_ctl.device.key = 7000
+ elif ctl_type == 'usb3':
+ disk_ctl.device.key = 14000
+
+ disk_ctl.device.deviceInfo = vim.Description()
+ disk_ctl.device.busNumber = bus_number
+
+ return disk_ctl
+
+ def gather_disk_controller_facts(self):
+ """
+ Gather existing controller facts
+
+ Return: A dictionary of each type controller facts
+ """
+ disk_ctl_facts = dict(
+ scsi=dict(),
+ sata=dict(),
+ nvme=dict(),
+ usb2=dict(),
+ usb3=dict()
+ )
+ for device in self.current_vm_obj.config.hardware.device:
+ ctl_facts_dict = dict()
+ if isinstance(device, tuple(self.controller_types.values())):
+ ctl_facts_dict[device.busNumber] = dict(
+ controller_summary=device.deviceInfo.summary,
+ controller_label=device.deviceInfo.label,
+ controller_busnumber=device.busNumber,
+ controller_controllerkey=device.controllerKey,
+ controller_devicekey=device.key,
+ controller_unitnumber=device.unitNumber,
+ controller_disks_devicekey=device.device,
+ )
+ if hasattr(device, 'sharedBus'):
+ ctl_facts_dict[device.busNumber]['controller_bus_sharing'] = device.sharedBus
+ if isinstance(device, tuple(self.device_helper.scsi_device_type.values())):
+ disk_ctl_facts['scsi'].update(ctl_facts_dict)
+ if isinstance(device, self.device_helper.nvme_device_type):
+ disk_ctl_facts['nvme'].update(ctl_facts_dict)
+ if isinstance(device, self.device_helper.sata_device_type):
+ disk_ctl_facts['sata'].update(ctl_facts_dict)
+ if isinstance(device, self.device_helper.usb_device_type.get('usb2')):
+ disk_ctl_facts['usb2'].update(ctl_facts_dict)
+ if isinstance(device, self.device_helper.usb_device_type.get('usb3')):
+ disk_ctl_facts['usb3'].update(ctl_facts_dict)
+
+ return disk_ctl_facts
+
+ def sanitize_disk_controller_config(self):
+ """
+ Check correctness of controller configuration provided by user
+
+ Return: A list of dictionary with checked controller configured
+ """
+ if not self.params.get('controllers'):
+ self.module.exit_json(changed=False, msg="No controller provided for virtual"
+ " machine '%s' for management." % self.current_vm_obj.name)
+ if 10 != self.params.get('sleep_time') <= 300:
+ self.sleep_time = self.params.get('sleep_time')
+ exec_get_unused_ctl_bus_number = False
+ controller_config = self.params.get('controllers')
+ for ctl_config in controller_config:
+ if ctl_config:
+ if ctl_config['type'] not in self.device_helper.usb_device_type.keys():
+ if ctl_config['state'] == 'absent' and ctl_config.get('controller_number') is None:
+ self.module.fail_json(msg="Disk controller number is required when removing it.")
+ if ctl_config['state'] == 'present' and not exec_get_unused_ctl_bus_number:
+ self.get_unused_ctl_bus_number()
+ exec_get_unused_ctl_bus_number = True
+ # starts from hardware version 13 nvme controller supported
+ if ctl_config['state'] == 'present' and ctl_config['type'] == 'nvme':
+ vm_hwv = int(self.current_vm_obj.config.version.split('-')[1])
+ if vm_hwv < 13:
+ self.module.fail_json(msg="Can not create new NVMe disk controller due to VM hardware version"
+ " is '%s', not >= 13." % vm_hwv)
+ if exec_get_unused_ctl_bus_number:
+ for ctl_config in controller_config:
+ if ctl_config and ctl_config['state'] == 'present' and ctl_config['type'] not in self.device_helper.usb_device_type.keys():
+ if ctl_config['type'] in self.device_helper.scsi_device_type.keys():
+ if len(self.disk_ctl_bus_num_list['scsi']) != 0:
+ ctl_config['controller_number'] = self.disk_ctl_bus_num_list['scsi'].pop(0)
+ else:
+ ctl_config['controller_number'] = None
+
+ elif ctl_config['type'] == 'sata' or ctl_config['type'] == 'nvme':
+ if len(self.disk_ctl_bus_num_list.get(ctl_config['type'])) != 0:
+ ctl_config['controller_number'] = self.disk_ctl_bus_num_list.get(ctl_config['type']).pop(0)
+ else:
+ ctl_config['controller_number'] = None
+
+ return controller_config
+
+ def configure_disk_controllers(self):
+ """
+ Do disk controller management, add or remove
+
+ Return: Operation result
+ """
+ if self.params['gather_disk_controller_facts']:
+ results = {'changed': False, 'failed': False, 'disk_controller_data': self.gather_disk_controller_facts()}
+ return results
+
+ controller_config = self.sanitize_disk_controller_config()
+ for disk_ctl_config in controller_config:
+ if disk_ctl_config and disk_ctl_config['state'] == 'present':
+ # create new USB controller, bus number is 0
+ if disk_ctl_config['type'] in self.device_helper.usb_device_type.keys():
+ usb_exists, has_disks_attached = self.check_ctl_disk_exist(disk_ctl_config['type'])
+ if usb_exists:
+ self.module.warn("'%s' USB controller already exists, can not add more." % disk_ctl_config['type'])
+ else:
+ disk_controller_new = self.create_controller(disk_ctl_config['type'], disk_ctl_config.get('bus_sharing'))
+ self.config_spec.deviceChange.append(disk_controller_new)
+ self.change_detected = True
+ # create other disk controller
+ else:
+ if disk_ctl_config.get('controller_number') is not None:
+ disk_controller_new = self.create_controller(
+ disk_ctl_config['type'],
+ disk_ctl_config.get('bus_sharing'),
+ disk_ctl_config.get('controller_number')
+ )
+ self.config_spec.deviceChange.append(disk_controller_new)
+ self.change_detected = True
+ else:
+ if disk_ctl_config['type'] in self.device_helper.scsi_device_type.keys():
+ self.module.warn("Already 4 SCSI controllers, can not add new '%s' controller." % disk_ctl_config['type'])
+ else:
+ self.module.warn("Already 4 '%s' controllers, can not add new one." % disk_ctl_config['type'])
+ elif disk_ctl_config and disk_ctl_config['state'] == 'absent':
+ existing_ctl, has_disks_attached = self.check_ctl_disk_exist(disk_ctl_config['type'], disk_ctl_config.get('controller_number'))
+ if existing_ctl is not None:
+ if not has_disks_attached:
+ ctl_spec = vim.vm.device.VirtualDeviceSpec()
+ ctl_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
+ ctl_spec.device = existing_ctl
+ self.config_spec.deviceChange.append(ctl_spec)
+ self.change_detected = True
+ else:
+ self.module.warn("Can not remove specified controller, type '%s', bus number '%s',"
+ " there are disks attaching to it." % (disk_ctl_config['type'], disk_ctl_config.get('controller_number')))
+ else:
+ self.module.warn("Can not find specified controller to remove, type '%s', bus number '%s'."
+ % (disk_ctl_config['type'], disk_ctl_config.get('controller_number')))
+
+ try:
+ task = self.current_vm_obj.ReconfigVM_Task(spec=self.config_spec)
+ wait_for_task(task)
+ except vim.fault.InvalidDeviceSpec as e:
+ self.module.fail_json(msg="Failed to configure controller on given virtual machine due to invalid"
+ " device spec : %s" % to_native(e.msg),
+ details="Please check ESXi server logs for more details.")
+ except vim.fault.RestrictedVersion as e:
+ self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
+ " product versioning restrictions: %s" % to_native(e.msg))
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+
+ if task.info.state == 'error':
+ results = {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg}
+ else:
+ if self.change_detected:
+ time.sleep(self.sleep_time)
+ results = {'changed': self.change_detected, 'failed': False, 'disk_controller_data': self.gather_disk_controller_facts()}
+
+ return results
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ folder=dict(type='str'),
+ datacenter=dict(type='str', default='ha-datacenter'),
+ controllers=dict(
+ type='list',
+ elements='dict',
+ required=False,
+ options=dict(
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ controller_number=dict(type='int', choices=[0, 1, 2, 3], required=False),
+ type=dict(
+ type='str',
+ choices=['sata', 'nvme', 'lsilogic', 'buslogic', 'lsilogicsas', 'paravirtual', 'usb2', 'usb3'],
+ required=True,
+ ),
+ bus_sharing=dict(
+ type='str',
+ choices=['noSharing', 'physicalSharing', 'virtualSharing'],
+ required=False,
+ default='noSharing',
+ ),
+ ),
+ ),
+ use_instance_uuid=dict(type='bool', default=False),
+ gather_disk_controller_facts=dict(type='bool', default=False),
+ sleep_time=dict(type='int', default=10),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ]
+ )
+
+ if module.params['folder']:
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ pyv = PyVmomiHelper(module)
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ if not vm:
+ # We unable to find the virtual machine user specified
+ # Bail out
+ vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
+ module.fail_json(msg="Unable to manage disk or USB controllers for non-existing virtual machine '%s'." % vm_id)
+
+ # VM exists
+ result = pyv.configure_disk_controllers()
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_cross_vc_clone.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_cross_vc_clone.py
new file mode 100644
index 000000000..5da157081
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_cross_vc_clone.py
@@ -0,0 +1,418 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Anusha Hegde <anushah@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: vmware_guest_cross_vc_clone
+
+short_description: Cross-vCenter VM/template clone
+
+
+description:
+ - 'This module can be used for Cross-vCenter vm/template clone'
+
+options:
+ name:
+ description:
+ - Name of the virtual machine or template.
+ - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the vm/template instance to clone from, this is VMware's unique identifier.
+ - This is a required parameter, if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the vm/template instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ destination_vm_name:
+ description:
+ - The name of the cloned VM.
+ type: str
+ required: true
+ destination_vcenter:
+ description:
+ - The hostname or IP address of the destination VCenter.
+ type: str
+ required: true
+ destination_vcenter_username:
+ description:
+ - The username of the destination VCenter.
+ type: str
+ required: true
+ destination_vcenter_password:
+ description:
+ - The password of the destination VCenter.
+ type: str
+ required: true
+ destination_vcenter_port:
+ description:
+ - The port to establish connection in the destination VCenter.
+ type: int
+ default: 443
+ destination_vcenter_validate_certs:
+ description:
+ - Parameter to indicate if certification validation needs to be done on destination VCenter.
+ type: bool
+ default: false
+ destination_host:
+ description:
+ - The name of the destination host.
+ type: str
+ required: true
+ destination_datastore:
+ description:
+ - The name of the destination datastore or the datastore cluster.
+ - If datastore cluster name is specified, we will find the Storage DRS recommended datastore in that cluster.
+ type: str
+ required: true
+ destination_vm_folder:
+ description:
+ - Destination folder, absolute path to deploy the cloned vm.
+ - This parameter is case sensitive.
+ - 'Examples:'
+ - ' folder: vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ type: str
+ required: true
+ destination_resource_pool:
+ description:
+ - Destination resource pool.
+ - If not provided, the destination host's parent's resource pool will be used.
+ type: str
+ is_template:
+ description:
+ - Specifies whether or not the new virtual machine should be marked as a template.
+ type: bool
+ default: false
+ state:
+ description:
+ - The state of Virtual Machine deployed.
+ - If set to C(present) and VM does not exists, then VM is created.
+ - If set to C(present) and VM exists, no action is taken.
+ - If set to C(poweredon) and VM does not exists, then VM is created with powered on state.
+ - If set to C(poweredon) and VM exists, no action is taken.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ 'present', 'poweredon' ]
+ timeout:
+ description:
+ - The timeout in seconds. When the timeout is reached, the module will fail.
+ type: int
+ default: 3600
+ version_added: '3.5.0'
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+
+author:
+ - Anusha Hegde (@anusha94)
+'''
+
+EXAMPLES = r'''
+# Clone template
+- name: clone a template across VC
+ community.vmware.vmware_guest_cross_vc_clone:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ name: "test_vm1"
+ destination_vm_name: "cloned_vm_from_template"
+ destination_vcenter: '{{ destination_vcenter_hostname }}'
+ destination_vcenter_username: '{{ destination_vcenter_username }}'
+ destination_vcenter_password: '{{ destination_vcenter_password }}'
+ destination_vcenter_port: '{{ destination_vcenter_port }}'
+ destination_vcenter_validate_certs: '{{ destination_vcenter_validate_certs }}'
+ destination_host: '{{ destination_esxi }}'
+ destination_datastore: '{{ destination_datastore }}'
+ destination_vm_folder: '{{ destination_vm_folder }}'
+ state: present
+ register: cross_vc_clone_from_template
+
+- name: clone a VM across VC
+ community.vmware.vmware_guest_cross_vc_clone:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: "{{ vcenter_password }}"
+ name: "test_vm1"
+ destination_vm_name: "cloned_vm_from_vm"
+ destination_vcenter: '{{ destination_vcenter_hostname }}'
+ destination_vcenter_username: '{{ destination_vcenter_username }}'
+ destination_vcenter_password: '{{ destination_vcenter_password }}'
+ destination_host: '{{ destination_esxi }}'
+ destination_datastore: '{{ destination_datastore }}'
+ destination_vm_folder: '{{ destination_vm_folder }}'
+ state: poweredon
+ register: cross_vc_clone_from_vm
+
+- name: check_mode support
+ community.vmware.vmware_guest_cross_vc_clone:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: "{{ vcenter_password }}"
+ name: "test_vm1"
+ destination_vm_name: "cloned_vm_from_vm"
+ destination_vcenter: '{{ destination_vcenter_hostname }}'
+ destination_vcenter_username: '{{ destination_vcenter_username }}'
+ destination_vcenter_password: '{{ destination_vcenter_password }}'
+ destination_host: '{{ destination_esxi }}'
+ destination_datastore: '{{ destination_datastore }}'
+ destination_vm_folder: '{{ destination_vm_folder }}'
+ check_mode: true
+'''
+
+RETURN = r'''
+vm_info:
+ description: metadata about the virtual machine
+ returned: always
+ type: dict
+ sample: {
+ "vm_name": "",
+ "vcenter": "",
+ "host": "",
+ "datastore": "",
+ "vm_folder": "",
+ "power_on": ""
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ find_hostsystem_by_name,
+ find_datastore_by_name,
+ find_folder_by_name,
+ find_vm_by_name,
+ connect_to_api,
+ vmware_argument_spec,
+ gather_vm_facts,
+ find_obj,
+ find_resource_pool_by_name,
+ wait_for_task,
+)
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+
+class CrossVCCloneManager(PyVmomi):
+ def __init__(self, module):
+ super(CrossVCCloneManager, self).__init__(module)
+ self.config_spec = vim.vm.ConfigSpec()
+ self.clone_spec = vim.vm.CloneSpec()
+ self.relocate_spec = vim.vm.RelocateSpec()
+ self.service_locator = vim.ServiceLocator()
+ self.destination_vcenter = self.params['destination_vcenter']
+ self.destination_vcenter_username = self.params['destination_vcenter_username']
+ self.destination_vcenter_password = self.params['destination_vcenter_password']
+ self.destination_vcenter_port = self.params.get('port', 443)
+ self.destination_vcenter_validate_certs = self.params.get('destination_vcenter_validate_certs', None)
+ self.timeout = self.params.get('timeout')
+
+ def get_new_vm_info(self, vm):
+ # to check if vm has been cloned in the destination vc
+ # connect to destination VC
+ # query for the vm in destination vc
+ # get the host and datastore info
+ # get the power status of the newly cloned vm
+ self.destination_content = connect_to_api(
+ self.module,
+ hostname=self.destination_vcenter,
+ username=self.destination_vcenter_username,
+ password=self.destination_vcenter_password,
+ port=self.destination_vcenter_port,
+ validate_certs=self.destination_vcenter_validate_certs)
+ info = {}
+ vm_obj = find_vm_by_name(content=self.destination_content, vm_name=vm)
+ if vm_obj is None:
+ self.module.fail_json(msg="Newly cloned VM is not found in the destination VCenter")
+ else:
+ vm_facts = gather_vm_facts(self.destination_content, vm_obj)
+ info['vm_name'] = vm
+ info['vcenter'] = self.destination_vcenter
+ info['host'] = vm_facts['hw_esxi_host']
+ info['datastore'] = vm_facts['hw_datastores']
+ info['vm_folder'] = vm_facts['hw_folder']
+ info['power_on'] = vm_facts['hw_power_status']
+ return info
+
+ def clone(self):
+ # clone the vm/template on destination VC
+ vm_folder = find_folder_by_name(content=self.destination_content, folder_name=self.params['destination_vm_folder'])
+ if not vm_folder:
+ self.module.fail_json(msg="Destination folder does not exist. Please refer to the documentation to correctly specify the folder.")
+ vm_name = self.params['destination_vm_name']
+ task = self.vm_obj.Clone(folder=vm_folder, name=vm_name, spec=self.clone_spec)
+ wait_for_task(task, timeout=self.timeout)
+ if task.info.state == 'error':
+ result = {'changed': False, 'failed': True, 'msg': task.info.error.msg}
+ else:
+ vm_info = self.get_new_vm_info(vm_name)
+ result = {'changed': True, 'failed': False, 'vm_info': vm_info}
+ return result
+
+ def sanitize_params(self):
+ '''
+ this method is used to verify user provided parameters
+ '''
+ self.vm_obj = self.get_vm()
+ if self.vm_obj is None:
+ vm_id = self.vm_uuid or self.vm_name or self.moid
+ self.module.fail_json(msg="Failed to find the VM/template with %s" % vm_id)
+
+ # connect to destination VC
+ self.destination_content = connect_to_api(
+ self.module,
+ hostname=self.destination_vcenter,
+ username=self.destination_vcenter_username,
+ password=self.destination_vcenter_password,
+ port=self.destination_vcenter_port,
+ validate_certs=self.destination_vcenter_validate_certs)
+
+ # Check if vm name already exists in the destination VC
+ vm = find_vm_by_name(content=self.destination_content, vm_name=self.params['destination_vm_name'])
+ if vm:
+ self.module.exit_json(changed=False, msg="A VM with the given name already exists")
+
+ datastore_name = self.params['destination_datastore']
+ datastore_cluster = find_obj(self.destination_content, [vim.StoragePod], datastore_name)
+ if datastore_cluster:
+ # If user specified datastore cluster so get recommended datastore
+ datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
+ # Check if get_recommended_datastore or user specified datastore exists or not
+ self.destination_datastore = find_datastore_by_name(content=self.destination_content, datastore_name=datastore_name)
+ if self.destination_datastore is None:
+ self.module.fail_json(msg="Destination datastore not found.")
+
+ self.destination_host = find_hostsystem_by_name(content=self.destination_content, hostname=self.params['destination_host'])
+ if self.destination_host is None:
+ self.module.fail_json(msg="Destination host not found.")
+
+ if self.params['destination_resource_pool']:
+ self.destination_resource_pool = find_resource_pool_by_name(
+ content=self.destination_content,
+ resource_pool_name=self.params['destination_resource_pool'])
+ else:
+ self.destination_resource_pool = self.destination_host.parent.resourcePool
+
+ def populate_specs(self):
+ # populate service locator
+ self.service_locator.instanceUuid = self.destination_content.about.instanceUuid
+ self.service_locator.url = "https://" + self.destination_vcenter + ":" + str(self.params['port']) + "/sdk"
+ # If ssl verify is false, we ignore it also in the clone task by fetching thumbprint automatically
+ if not self.destination_vcenter_validate_certs:
+ self.service_locator.sslThumbprint = self.get_cert_fingerprint(
+ self.destination_vcenter,
+ self.destination_vcenter_port,
+ self.module.params['proxy_host'],
+ self.module.params['proxy_port'])
+ creds = vim.ServiceLocatorNamePassword()
+ creds.username = self.destination_vcenter_username
+ creds.password = self.destination_vcenter_password
+ self.service_locator.credential = creds
+
+ # populate relocate spec
+ self.relocate_spec.datastore = self.destination_datastore
+ self.relocate_spec.pool = self.destination_resource_pool
+ self.relocate_spec.service = self.service_locator
+ self.relocate_spec.host = self.destination_host
+
+ # populate clone spec
+ self.clone_spec.config = self.config_spec
+ self.clone_spec.powerOn = True if self.params['state'].lower() == 'poweredon' else False
+ self.clone_spec.location = self.relocate_spec
+ self.clone_spec.template = self.params['is_template']
+
+
+def main():
+ """
+ Main method
+ """
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ destination_vm_name=dict(type='str', required=True),
+ destination_datastore=dict(type='str', required=True),
+ destination_host=dict(type='str', required=True),
+ destination_vcenter=dict(type='str', required=True),
+ destination_vcenter_username=dict(type='str', required=True),
+ destination_vcenter_password=dict(type='str', required=True, no_log=True),
+ destination_vcenter_port=dict(type='int', default=443),
+ destination_vcenter_validate_certs=dict(type='bool', default=False),
+ destination_vm_folder=dict(type='str', required=True),
+ destination_resource_pool=dict(type='str', default=None),
+ is_template=dict(type='bool', default=False),
+ state=dict(type='str', default='present',
+ choices=['present', 'poweredon']),
+ timeout=dict(type='int', default=3600)
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['uuid', 'name', 'moid'],
+ ],
+ mutually_exclusive=[
+ ['uuid', 'name', 'moid'],
+ ],
+ )
+ result = {'failed': False, 'changed': False}
+ if module.check_mode:
+ if module.params['state'] in ['present']:
+ result.update(
+ vm_name=module.params['destination_vm_name'],
+ vcenter=module.params['destination_vcenter'],
+ host=module.params['destination_host'],
+ datastore=module.params['destination_datastore'],
+ vm_folder=module.params['destination_vm_folder'],
+ state=module.params['state'],
+ changed=True,
+ desired_operation='Create VM with PowerOff State'
+ )
+ if module.params['state'] == 'poweredon':
+ result.update(
+ vm_name=module.params['destination_vm_name'],
+ vcenter=module.params['destination_vcenter'],
+ host=module.params['destination_host'],
+ datastore=module.params['destination_datastore'],
+ vm_folder=module.params['destination_vm_folder'],
+ state=module.params['state'],
+ changed=True,
+ desired_operation='Create VM with PowerON State'
+ )
+ module.exit_json(**result)
+
+ clone_manager = CrossVCCloneManager(module)
+ clone_manager.sanitize_params()
+ clone_manager.populate_specs()
+ result = clone_manager.clone()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_custom_attribute_defs.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_custom_attribute_defs.py
new file mode 100644
index 000000000..c7fdec652
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_custom_attribute_defs.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_custom_attribute_defs
+short_description: Manage custom attributes definitions for virtual machine from VMware
+description:
+ - This module can be used to add and remove custom attributes definitions for the given virtual machine from VMware.
+author:
+ - Jimmy Conner (@cigamit)
+ - Abhijeet Kasurde (@Akasurde)
+options:
+ attribute_key:
+ description:
+ - Name of the custom attribute definition.
+ - This is required parameter, if C(state) is set to C(present) or C(absent).
+ required: false
+ type: str
+ state:
+ description:
+ - Manage definition of custom attributes.
+ - If set to C(present) and definition not present, then custom attribute definition is created.
+ - If set to C(present) and definition is present, then no action taken.
+ - If set to C(absent) and definition is present, then custom attribute definition is removed.
+ - If set to C(absent) and definition is absent, then no action taken.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add VMware Attribute Definition
+ community.vmware.vmware_guest_custom_attribute_defs:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ state: present
+ attribute_key: custom_attr_def_1
+ delegate_to: localhost
+ register: defs
+
+- name: Remove VMware Attribute Definition
+ community.vmware.vmware_guest_custom_attribute_defs:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ state: absent
+ attribute_key: custom_attr_def_1
+ delegate_to: localhost
+ register: defs
+'''
+
+RETURN = r'''
+custom_attribute_defs:
+ description: list of all current attribute definitions
+ returned: always
+ type: list
+ sample: ["sample_5", "sample_4"]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+
+class VmAttributeDefManager(PyVmomi):
+ def __init__(self, module):
+ super(VmAttributeDefManager, self).__init__(module)
+
+ def remove_custom_def(self, field):
+ changed = False
+ f = dict()
+ for x in self.custom_field_mgr:
+ if x.name == field and x.managedObjectType == vim.VirtualMachine:
+ changed = True
+ if not self.module.check_mode:
+ self.content.customFieldsManager.RemoveCustomFieldDef(key=x.key)
+ break
+ f[x.name] = (x.key, x.managedObjectType)
+ return {'changed': changed, 'failed': False, 'custom_attribute_defs': list(f.keys())}
+
+ def add_custom_def(self, field):
+ changed = False
+ found = False
+ f = dict()
+ for x in self.custom_field_mgr:
+ if x.name == field:
+ found = True
+ f[x.name] = (x.key, x.managedObjectType)
+
+ if not found:
+ changed = True
+ if not self.module.check_mode:
+ new_field = self.content.customFieldsManager.AddFieldDefinition(name=field, moType=vim.VirtualMachine)
+ f[new_field.name] = (new_field.key, new_field.type)
+ return {'changed': changed, 'failed': False, 'custom_attribute_defs': list(f.keys())}
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ attribute_key=dict(type='str', no_log=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ['state', 'present', ['attribute_key']],
+ ['state', 'absent', ['attribute_key']],
+ ]
+ )
+
+ pyv = VmAttributeDefManager(module)
+ results = dict(changed=False, custom_attribute_defs=list())
+ if module.params['state'] == "present":
+ results = pyv.add_custom_def(module.params['attribute_key'])
+ elif module.params['state'] == "absent":
+ results = pyv.remove_custom_def(module.params['attribute_key'])
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_custom_attributes.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_custom_attributes.py
new file mode 100644
index 000000000..1677f9ef2
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_custom_attributes.py
@@ -0,0 +1,358 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright, (c) 2018, Ansible Project
+# Copyright, (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_custom_attributes
+short_description: Manage custom attributes from VMware for the given virtual machine
+description:
+ - This module can be used to add, remove and update custom attributes for the given virtual machine.
+author:
+ - Jimmy Conner (@cigamit)
+ - Abhijeet Kasurde (@Akasurde)
+options:
+ name:
+ description:
+ - Name of the virtual machine to work with.
+ - This is required parameter, if C(uuid) or C(moid) is not supplied.
+ type: str
+ state:
+ description:
+ - The action to take.
+ - If set to C(present), then custom attribute is added or updated.
+ - If set to C(absent), then custom attribute value is removed.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+ uuid:
+ description:
+ - UUID of the virtual machine to manage if known. This is VMware's unique identifier.
+ - This is required parameter, if C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ folder:
+ description:
+ - Absolute path to find an existing guest.
+ - This is required parameter, if C(name) is supplied and multiple virtual machines with same name are found.
+ type: str
+ datacenter:
+ description:
+ - Datacenter name where the virtual machine is located in.
+ type: str
+ attributes:
+ description:
+ - A list of name and value of custom attributes that needs to be manage.
+ - Value of custom attribute is not required and will be ignored, if C(state) is set to C(absent).
+ suboptions:
+ name:
+ description:
+ - Name of the attribute.
+ type: str
+ required: true
+ value:
+ description:
+ - Value of the attribute.
+ type: str
+ default: ''
+ default: []
+ type: list
+ elements: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add virtual machine custom attributes
+ community.vmware.vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: present
+ attributes:
+ - name: MyAttribute
+ value: MyValue
+ delegate_to: localhost
+ register: attributes
+
+- name: Add multiple virtual machine custom attributes
+ community.vmware.vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: present
+ attributes:
+ - name: MyAttribute
+ value: MyValue
+ - name: MyAttribute2
+ value: MyValue2
+ delegate_to: localhost
+ register: attributes
+
+- name: Remove virtual machine Attribute
+ community.vmware.vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: absent
+ attributes:
+ - name: MyAttribute
+ delegate_to: localhost
+ register: attributes
+
+- name: Remove virtual machine Attribute using Virtual Machine MoID
+ community.vmware.vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ moid: vm-42
+ state: absent
+ attributes:
+ - name: MyAttribute
+ delegate_to: localhost
+ register: attributes
+'''
+
+RETURN = r'''
+custom_attributes:
+ description: metadata about the virtual machine attributes
+ returned: always
+ type: dict
+ sample: {
+ "mycustom": "my_custom_value",
+ "mycustom_2": "my_custom_value_2",
+ "sample_1": "sample_1_value",
+ "sample_2": "sample_2_value",
+ "sample_3": "sample_3_value"
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VmAttributeManager(PyVmomi):
+ def __init__(self, module):
+ super(VmAttributeManager, self).__init__(module)
+
+ # Initialize the variables.
+ # Make the diff_config variable to check the difference between a new and existing config.
+ # https://docs.ansible.com/ansible/latest/reference_appendices/common_return_values.html#diff
+ self.diff_config = dict(before={}, after={})
+
+ # reuslt_fields is the variable for the return value after the job finish.
+ self.result_fields = {}
+
+ # update_custom_attributes is the variable for storing the custom attributes to update.
+ self.update_custom_attributes = []
+
+ # changed variable is the flag of whether the target changed.
+ # https://docs.ansible.com/ansible/latest/reference_appendices/common_return_values.html#changed
+ self.changed = False
+
+ def set_custom_field(self, vm, user_fields):
+ """Add or update the custom attribute and value.
+
+ Args:
+ vm (vim.VirtualMachine): The managed object of a virtual machine.
+ user_fields (list): list of the specified custom attributes by user.
+
+ Returns:
+ The dictionary for the ansible return value.
+ """
+ self.check_exists(vm, user_fields)
+ if self.module.check_mode is True:
+ self.module.exit_json(changed=self.changed, diff=self.diff_config)
+
+ # If update_custom_attributes variable has elements, add or update the custom attributes and values.
+ for field in self.update_custom_attributes:
+ if 'key' in field:
+ self.content.customFieldsManager.SetField(entity=vm, key=field['key'], value=field['value'])
+ else:
+ field_key = self.content.customFieldsManager.AddFieldDefinition(name=field['name'],
+ moType=vim.VirtualMachine)
+ self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field['value'])
+
+ # Set result_fields for the return value.
+ self.result_fields[field['name']] = field['value']
+
+ return {'changed': self.changed, 'failed': False, 'custom_attributes': self.result_fields}
+
+ def remove_custom_field(self, vm, user_fields):
+ """Remove the value from the existing custom attribute.
+
+ Args:
+ vm (vim.VirtualMachine): The managed object of a virtual machine.
+ user_fields (list): list of the specified custom attributes by user.
+
+ Returns:
+ The dictionary for the ansible return value.
+ """
+ # All custom attribute values will set blank to remove the value.
+ for v in user_fields:
+ v['value'] = ''
+
+ self.check_exists(vm, user_fields)
+ if self.module.check_mode is True:
+ self.module.exit_json(changed=self.changed, diff=self.diff_config)
+
+ # If update_custom_attributes variable has elements, remove the custom attribute values.
+ for field in self.update_custom_attributes:
+ self.content.customFieldsManager.SetField(entity=vm, key=field['key'], value=field['value'])
+
+ # Set result_fields for the return value.
+ self.result_fields[field['name']] = field['value']
+
+ return {'changed': self.changed, 'failed': False, 'custom_attributes': self.result_fields}
+
+ def check_exists(self, vm, user_fields):
+ """Check the existing custom attributes and values.
+
+ In the function, the below processing is executed.
+
+ Gather the existing custom attributes from the virtual machine and make update_custom_attributes for updating
+ if it has differences between the existing configuration and the user_fields.
+
+ And set diff key for checking between before and after configuration to self.diff_config.
+
+ Args:
+ vm (vim.VirtualMachine): The managed object of a virtual machine.
+ user_fields (list): list of the specified custom attributes by user.
+ """
+ # Gather the available existing custom attributes based on user_fields
+ existing_custom_attributes = []
+ for k, n in [(x.key, x.name) for x in self.custom_field_mgr for v in user_fields if x.name == v['name']]:
+ existing_custom_attributes.append({
+ "key": k,
+ "name": n
+ })
+
+ # Gather the values of set the custom attribute.
+ for e in existing_custom_attributes:
+ for v in vm.customValue:
+ if e['key'] == v.key:
+ e['value'] = v.value
+
+ # When add custom attribute as a new one, it has not the value key.
+ # Add the value key to avoid unintended behavior in the difference check.
+ if 'value' not in e:
+ e['value'] = ''
+
+ # Select the custom attribute and value to update the configuration.
+ _user_fields_for_diff = []
+ for v in user_fields:
+ for e in existing_custom_attributes:
+ if v['name'] == e['name'] and v['value'] != e['value']:
+ self.update_custom_attributes.append({
+ "name": v['name'],
+ "value": v['value'],
+ "key": e['key']
+ })
+
+ if v['name'] == e['name']:
+ _user_fields_for_diff.append({
+ "name": v['name'],
+ "value": v['value']
+ })
+ # Add the custom attribute as a new one if the state is present and existing_custom_attribute has not the custom attribute name.
+ if v['name'] not in [x['name'] for x in existing_custom_attributes] and self.params['state'] == "present":
+ self.update_custom_attributes.append(v)
+ _user_fields_for_diff.append({
+ "name": v['name'],
+ "value": v['value']
+ })
+
+ # If the custom attribute exists to update, the changed is set to True.
+ if self.update_custom_attributes:
+ self.changed = True
+
+ # Add custom_attributes key for the difference between before and after configuration to check.
+ self.diff_config['before']['custom_attributes'] = sorted(
+ [x for x in existing_custom_attributes if x.pop('key', None)], key=lambda k: k['name']
+ )
+ self.diff_config['after']['custom_attributes'] = sorted(_user_fields_for_diff, key=lambda k: k['name'])
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(type='str'),
+ name=dict(type='str'),
+ folder=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ attributes=dict(
+ type='list',
+ default=[],
+ elements='dict',
+ options=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str', default=''),
+ )
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ )
+
+ if module.params.get('folder'):
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ pyv = VmAttributeManager(module)
+ results = {'changed': False, 'failed': False, 'instance': dict()}
+
+ # Check if the virtual machine exists before continuing
+ vm = pyv.get_vm()
+
+ if vm:
+ # virtual machine already exists
+ if module.params['state'] == "present":
+ results = pyv.set_custom_field(vm, module.params['attributes'])
+ elif module.params['state'] == "absent":
+ results = pyv.remove_custom_field(vm, module.params['attributes'])
+ module.exit_json(**results)
+ else:
+ # virtual machine does not exists
+ vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
+ module.fail_json(msg="Unable to manage custom attributes for non-existing"
+ " virtual machine %s" % vm_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_customization_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_customization_info.py
new file mode 100644
index 000000000..3f97c533b
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_customization_info.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_customization_info
+short_description: Gather info about VM customization specifications
+description:
+ - This module can be used to gather information about customization specifications.
+ - All parameters and VMware object names are case sensitive.
+author:
+ - Abhijeet Kasurde (@Akasurde)
+options:
+ spec_name:
+ description:
+ - Name of customization specification to find.
+ required: false
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info about all customization specification
+ community.vmware.vmware_guest_customization_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ delegate_to: localhost
+ register: all_custom_spec_info
+
+- name: Gather info about customization specification with the given name
+ community.vmware.vmware_guest_customization_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ spec_name: custom_linux_spec
+ delegate_to: localhost
+ register: custom_spec_info
+'''
+
+RETURN = r'''
+custom_spec_info:
+ description: metadata about the customization specification
+ returned: always
+ type: dict
+ sample: {
+ "assignip-eee0d684-44b7-457c-8c55-2585590b0d99": {
+ "change_version": "1523438001",
+ "description": "sample description",
+ "dns_server_list": [],
+ "dns_suffix_list": [],
+ "domain": "None",
+ "hostname": "sample1",
+ "hw_clock_utc": null,
+ "last_updated_time": "2018-04-11T09:13:21+00:00",
+ "name": "sample",
+ "nic_setting_map": [
+ {
+ "dns_domain": null,
+ "gateway": [],
+ "ip_address": "192.168.10.10",
+ "net_bios": null,
+ "nic_dns_server_list": [],
+ "primary_wins": null,
+ "secondry_wins": null,
+ "subnet_mask": "255.255.255.0"
+ }
+ ],
+ "time_zone": null,
+ "type": "Linux"
+ },
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VmwareCustomSpecManger(PyVmomi):
+ def __init__(self, module):
+ super(VmwareCustomSpecManger, self).__init__(module)
+ self.cc_mgr = self.content.customizationSpecManager
+ if self.cc_mgr is None:
+ self.module.fail_json(msg="Failed to get customization spec manager.")
+
+ def gather_custom_spec_info(self):
+ """
+ Gather information about customization specifications
+ """
+
+ spec_name = self.params.get('spec_name', None)
+ specs_list = []
+ if spec_name:
+ if self.cc_mgr.DoesCustomizationSpecExist(name=spec_name):
+ specs_list.append(spec_name)
+ else:
+ self.module.fail_json(msg="Unable to find customization specification named '%s'" % spec_name)
+ else:
+ available_specs = self.cc_mgr.info
+ for spec_info in available_specs:
+ specs_list.append(spec_info.name)
+
+ spec_info = dict()
+ for spec in specs_list:
+ current_spec = self.cc_mgr.GetCustomizationSpec(name=spec)
+ adapter_mapping_list = []
+ for nic in current_spec.spec.nicSettingMap:
+ temp_data = dict(
+ mac_address=nic.macAddress,
+ ip_address=nic.adapter.ip.ipAddress if hasattr(nic.adapter.ip, 'ipAddress') else None,
+ subnet_mask=nic.adapter.subnetMask,
+ gateway=list(nic.adapter.gateway),
+ nic_dns_server_list=list(nic.adapter.dnsServerList),
+ dns_domain=nic.adapter.dnsDomain,
+ primary_wins=nic.adapter.primaryWINS,
+ secondry_wins=nic.adapter.secondaryWINS,
+ net_bios=nic.adapter.netBIOS,
+ )
+ adapter_mapping_list.append(temp_data)
+
+ # Set the following variables from parameters in LnuxPrep or SysPrep
+ current_hostname = None
+ domain = None
+ time_zone = None
+ hw_clock = None
+ if isinstance(current_spec.spec.identity, vim.vm.customization.LinuxPrep):
+ if isinstance(current_spec.spec.identity.hostName, vim.vm.customization.PrefixNameGenerator):
+ current_hostname = current_spec.spec.identity.hostName.base
+ elif isinstance(current_spec.spec.identity.hostName, vim.vm.customization.FixedName):
+ current_hostname = current_spec.spec.identity.hostName.name
+ domain = current_spec.spec.identity.domain
+ time_zone = current_spec.spec.identity.timeZone
+ hw_clock = current_spec.spec.identity.hwClockUTC
+ else:
+ time_zone = current_spec.spec.identity.guiUnattended.timeZone
+
+ spec_info[spec] = dict(
+ # Spec
+ name=current_spec.info.name,
+ description=current_spec.info.description,
+ type=current_spec.info.type,
+ last_updated_time=current_spec.info.lastUpdateTime,
+ change_version=current_spec.info.changeVersion,
+ # Identity
+ hostname=current_hostname,
+ domain=domain,
+ time_zone=time_zone,
+ hw_clock_utc=hw_clock,
+ # global IP Settings
+ dns_suffix_list=list(current_spec.spec.globalIPSettings.dnsSuffixList),
+ dns_server_list=list(current_spec.spec.globalIPSettings.dnsServerList),
+ # NIC setting map
+ nic_setting_map=adapter_mapping_list,
+ )
+ return spec_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ spec_name=dict(type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ pyv = VmwareCustomSpecManger(module)
+ try:
+ module.exit_json(custom_spec_info=pyv.gather_custom_spec_info())
+ except Exception as exc:
+ module.fail_json(msg="Failed to gather information with exception : %s" % to_text(exc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_disk.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_disk.py
new file mode 100644
index 000000000..fd54d41fa
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_disk.py
@@ -0,0 +1,1237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_disk
+short_description: Manage disks related to virtual machine in given vCenter infrastructure
+description:
+ - This module can be used to add, remove and update disks belonging to given virtual machine.
+ - All parameters and VMware object names are case sensitive.
+ - This module is destructive in nature, please read documentation carefully before proceeding.
+ - Be careful while removing disk specified as this may lead to data loss.
+author:
+ - Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
+options:
+ name:
+ description:
+ - Name of the virtual machine.
+ - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to gather facts if known, this is VMware's unique identifier.
+ - This is a required parameter, if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is a required parameter, only if multiple VMs are found with same name.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ description:
+ - The datacenter name to which virtual machine belongs to.
+ required: true
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ disk:
+ description:
+ - A list of disks to add or remove.
+ - The virtual disk related information is provided using this list.
+ - All values and parameters are case sensitive.
+ suboptions:
+ size:
+ description:
+ - Disk storage size.
+ - If size specified then unit must be specified. There is no space allowed in between size number and unit.
+ - Only first occurrence in disk element will be considered, even if there are multiple size* parameters available.
+ type: str
+ size_kb:
+ description: Disk storage size in kb.
+ type: int
+ size_mb:
+ description: Disk storage size in mb.
+ type: int
+ size_gb:
+ description: Disk storage size in gb.
+ type: int
+ size_tb:
+ description: Disk storage size in tb.
+ type: int
+ type:
+ description:
+ - The type of disk, if not specified then use C(thick) type for new disk, no eagerzero.
+ - The disk type C(rdm) is added in version 1.13.0.
+ - The disk type C(vpmemdisk) is added in version 2.7.0.
+ type: str
+ choices: ['thin', 'eagerzeroedthick', 'thick', 'rdm', 'vpmemdisk']
+ disk_mode:
+ description:
+ - Type of disk mode. If not specified then use C(persistent) mode for new disk.
+ - If set to 'persistent' mode, changes are immediately and permanently written to the virtual disk.
+ - If set to 'independent_persistent' mode, same as persistent, but not affected by snapshots.
+ - If set to 'independent_nonpersistent' mode, changes to virtual disk are made to a redo log and discarded
+ at power off, but not affected by snapshots.
+ - Not applicable when disk C(type) is set to C(vpmemdisk).
+ type: str
+ choices: ['persistent', 'independent_persistent', 'independent_nonpersistent']
+ rdm_path:
+ description:
+ - Path of LUN for Raw Device Mapping required for disk type C(rdm).
+ - Only valid if C(type) is set to C(rdm).
+ type: str
+ cluster_disk:
+ description:
+ - This value allows for the sharing of an RDM between two machines.
+ - The primary machine holding the RDM uses the default C(false).
+ - The secondary machine holding the RDM uses C(true).
+ type: bool
+ default: false
+ compatibility_mode:
+ description: Compatibility mode for raw devices. Required when disk type C(type) is set to C(rdm).
+ type: str
+ choices: ['physicalMode','virtualMode']
+ sharing:
+ description:
+ - The sharing mode of the virtual disk.
+ - Setting sharing means that multiple virtual machines can write to the virtual disk.
+ - Sharing can only be set if C(type) is set to C(eagerzeroedthick) or C(rdm).
+ type: bool
+ default: false
+ datastore:
+ description:
+ - Name of datastore or datastore cluster to be used for the disk.
+ - Not applicable when disk C(type) is set to C(vpmemdisk).
+ type: str
+ autoselect_datastore:
+ description:
+ - Select the less used datastore. Specify only if C(datastore) is not specified.
+ - Not applicable when disk C(type) is set to C(vpmemdisk).
+ type: bool
+ scsi_controller:
+ description:
+ - SCSI controller number. Only 4 SCSI controllers are allowed per VM.
+ - Care should be taken while specifying 'scsi_controller' is 0 and 'unit_number' as 0 as this disk may contain OS.
+ type: int
+ choices: [0, 1, 2, 3]
+ bus_sharing:
+ description:
+ - Only functions with Paravirtual SCSI Controller.
+ - Allows for the sharing of the scsi bus between two virtual machines.
+ type: str
+ choices: ['noSharing', 'physicalSharing', 'virtualSharing']
+ default: 'noSharing'
+ unit_number:
+ description:
+ - Disk Unit Number.
+ - Valid value range from 0 to 15, except 7 for SCSI Controller.
+ - Valid value range from 0 to 64, except 7 for Paravirtual SCSI Controller on Virtual Hardware version 14 or higher.
+ - Valid value range from 0 to 29 for SATA controller.
+ - Valid value range from 0 to 14 for NVME controller.
+ - Valid value range from 0 to 1 for IDE controller.
+ type: int
+ required: true
+ scsi_type:
+ description:
+ - Type of SCSI controller. This value is required only for the first occurrence of SCSI Controller.
+ - This value is ignored, if SCSI Controller is already present or C(state) is C(absent).
+ type: str
+ choices: ['buslogic', 'lsilogic', 'lsilogicsas', 'paravirtual']
+ destroy:
+ description: If C(state) is C(absent), make sure the disk file is deleted from the datastore. Added in version 2.10.
+ type: bool
+ default: true
+ filename:
+ description:
+ - Existing disk image to be used. Filename must already exist on the datastore.
+ - Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in version 2.10.
+ - Not applicable when disk C(type) is set to C(vpmemdisk).
+ type: str
+ state:
+ description:
+ - State of disk.
+ - If set to 'absent', disk will be removed permanently from virtual machine configuration and from VMware storage.
+ - If set to 'present', disk will be added if not present at given Controller and Unit Number.
+ - or disk exists with different size, disk size is increased, reducing disk size is not allowed.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ controller_type:
+ description:
+ - This parameter is added for managing disks attaching other types of controllers, e.g., SATA or NVMe.
+ - If either C(controller_type) or C(scsi_type) is not specified, then use C(paravirtual) type.
+ type: str
+ choices: ['buslogic', 'lsilogic', 'lsilogicsas', 'paravirtual', 'sata', 'nvme', 'ide']
+ controller_number:
+ description:
+ - This parameter is used with C(controller_type) for specifying controller bus number.
+ - For C(ide) controller type, valid value is 0 or 1.
+ type: int
+ choices: [0, 1, 2, 3]
+ iolimit:
+ description:
+ - Section specifies the shares and limit for storage I/O resource.
+ - Not applicable when disk C(type) is set to C(vpmemdisk).
+ suboptions:
+ limit:
+ description: Section specifies values for limit where the utilization of a virtual machine will not exceed, even if there are available resources.
+ type: int
+ shares:
+ description: Specifies different types of shares user can add for the given disk.
+ suboptions:
+ level:
+ description: Specifies different level for the shares section.
+ type: str
+ choices: ['low', 'normal', 'high', 'custom']
+ level_value:
+ description: Custom value when C(level) is set as C(custom).
+ type: int
+ type: dict
+ type: dict
+ shares:
+ description:
+ - Section for iolimit section tells about what are all different types of shares user can add for disk.
+ - Not applicable when disk C(type) is set to C(vpmemdisk).
+ suboptions:
+ level:
+ description: Tells about different level for the shares section.
+ type: str
+ choices: ['low', 'normal', 'high', 'custom']
+ level_value:
+ description: Custom value when C(level) is set as C(custom).
+ type: int
+ type: dict
+ default: []
+ type: list
+ elements: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Add disks to virtual machine using UUID
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ disk:
+ - size_mb: 10
+ type: thin
+ datastore: datacluster0
+ state: present
+ scsi_controller: 1
+ unit_number: 1
+ scsi_type: 'paravirtual'
+ disk_mode: 'persistent'
+ - size_gb: 10
+ type: eagerzeroedthick
+ state: present
+ autoselect_datastore: true
+ scsi_controller: 2
+ scsi_type: 'buslogic'
+ unit_number: 12
+ disk_mode: 'independent_persistent'
+ - size: 10Gb
+ type: eagerzeroedthick
+ state: present
+ autoselect_datastore: true
+ scsi_controller: 2
+ scsi_type: 'buslogic'
+ unit_number: 1
+ disk_mode: 'independent_nonpersistent'
+ - filename: "[datastore1] path/to/existing/disk.vmdk"
+ delegate_to: localhost
+ register: disk_facts
+
+- name: Add disks with specified shares to the virtual machine
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ disk:
+ - size_gb: 1
+ type: thin
+ datastore: datacluster0
+ state: present
+ scsi_controller: 1
+ unit_number: 1
+ disk_mode: 'independent_persistent'
+ shares:
+ level: custom
+ level_value: 1300
+ delegate_to: localhost
+ register: test_custom_shares
+
+- name: Add physical raw device mapping to virtual machine using name
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ validate_certs: false
+ name: "Test_VM"
+ disk:
+ - type: rdm
+ state: present
+ scsi_controller: 1
+ unit_number: 5
+ rdm_path: /vmfs/devices/disks/naa.060000003b1234efb453
+ compatibility_mode: 'physicalMode'
+
+- name: Add virtual raw device mapping to virtual machine using name and virtual mode
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ validate_certs: false
+ name: "Test_VM"
+ disk:
+ - type: rdm
+ state: present
+ scsi_controller: 1
+ unit_number: 5
+ rdm_path: /vmfs/devices/disks/naa.060000003b1234efb453
+ compatibility_mode: 'virtualMode'
+ disk_mode: 'persistent'
+
+- name: Add raw device mapping to virtual machine with Physical bus sharing
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ validate_certs: false
+ name: "Test_VM"
+ disk:
+ - type: rdm
+ state: present
+ scsi_controller: 1
+ unit_number: 5
+ rdm_path: /vmfs/devices/disks/naa.060000003b1234efb453
+ compatibility_mode: 'virtualMode'
+ disk_mode: 'persistent'
+ bus_sharing: physicalSharing
+
+- name: Add raw device mapping to virtual machine with Physical bus sharing and clustered disk
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ validate_certs: false
+ name: "Test_VM"
+ disk:
+ - type: rdm
+ state: present
+ scsi_controller: 1
+ unit_number: 5
+ compatibility_mode: 'virtualMode'
+ disk_mode: 'persistent'
+ bus_sharing: physicalSharing
+ filename: "[datastore1] path/to/rdm/disk-marker.vmdk"
+
+- name: create new disk with custom IO limits and shares in IO Limits
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ disk:
+ - size_gb: 1
+ type: thin
+ datastore: datacluster0
+ state: present
+ scsi_controller: 1
+ unit_number: 1
+ disk_mode: 'independent_persistent'
+ iolimit:
+ limit: 1506
+ shares:
+ level: custom
+ level_value: 1305
+ delegate_to: localhost
+ register: test_custom_IoLimit_shares
+
+- name: Remove disks from virtual machine using name
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ name: VM_225
+ disk:
+ - state: absent
+ scsi_controller: 1
+ unit_number: 1
+ delegate_to: localhost
+ register: disk_facts
+
+- name: Remove disk from virtual machine using moid
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ moid: vm-42
+ disk:
+ - state: absent
+ scsi_controller: 1
+ unit_number: 1
+ delegate_to: localhost
+ register: disk_facts
+
+- name: Remove disk from virtual machine but keep the VMDK file on the datastore
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ name: VM_225
+ disk:
+ - state: absent
+ scsi_controller: 1
+ unit_number: 2
+ destroy: false
+ delegate_to: localhost
+ register: disk_facts
+
+- name: Add disks to virtual machine using UUID to SATA and NVMe controller
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ validate_certs: false
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ disk:
+ - size_mb: 256
+ type: thin
+ datastore: datacluster0
+ state: present
+ controller_type: sata
+ controller_number: 1
+ unit_number: 1
+ disk_mode: 'persistent'
+ - size_gb: 1
+ state: present
+ autoselect_datastore: true
+ controller_type: nvme
+ controller_number: 2
+ unit_number: 3
+ disk_mode: 'independent_persistent'
+ delegate_to: localhost
+ register: disk_facts
+
+- name: Add a new vPMem disk to virtual machine to SATA controller
+ community.vmware.vmware_guest_disk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ validate_certs: false
+ name: VM_226
+ disk:
+ - type: vpmemdisk
+ size_gb: 1
+ state: present
+ controller_type: sata
+ controller_number: 1
+ unit_number: 2
+ delegate_to: localhost
+ register: disk_facts
+'''
+
+RETURN = r'''
+disk_data:
+ description: metadata about the virtual machine's disks after managing them
+ returned: always
+ type: dict
+ sample: {
+ "0": {
+ "backing_datastore": "datastore2",
+ "backing_disk_mode": "persistent",
+ "backing_eagerlyscrub": false,
+ "backing_filename": "[datastore2] VM_225/VM_225.vmdk",
+ "backing_thinprovisioned": false,
+ "backing_writethrough": false,
+ "backing_uuid": "421e4592-c069-924d-ce20-7e7533fab926",
+ "capacity_in_bytes": 10485760,
+ "capacity_in_kb": 10240,
+ "controller_key": 1000,
+ "key": 2000,
+ "label": "Hard disk 1",
+ "summary": "10,240 KB",
+ "unit_number": 0
+ },
+ }
+disk_changes:
+ description: result of each task, key is the 0-based index with the same sequence in which the tasks were defined
+ returned: always
+ type: dict
+ sample: {
+ "0": "Disk deleted.",
+ "1": "Disk created."
+ }
+'''
+
+import re
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from random import randint
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec,\
+ wait_for_task, find_obj, get_all_objs, get_parent_datacenter
+from ansible_collections.community.vmware.plugins.module_utils.vm_device_helper import PyVmomiDeviceHelper
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.device_helper = PyVmomiDeviceHelper(self.module)
+ self.desired_disks = self.params['disk'] # Match with vmware_guest parameter
+ self.vm = None
+ self.config_spec = vim.vm.ConfigSpec()
+ self.config_spec.deviceChange = []
+
+ def find_disk_by_key(self, disk_key, disk_unit_number):
+ found_disk = None
+ for device in self.vm.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualDisk) and device.key == disk_key:
+ if device.unitNumber == disk_unit_number:
+ found_disk = device
+ break
+
+ return found_disk
+
+ @staticmethod
+ def create_disk(ctl_key, disk):
+ """
+ Create Virtual Device Spec for virtual disk
+ Args:
+ ctl_key: Unique SCSI Controller Key
+ disk: The disk configurations dict
+
+ Returns: Virtual Device Spec for virtual disk
+
+ """
+ disk_spec = vim.vm.device.VirtualDeviceSpec()
+ disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ disk_spec.device = vim.vm.device.VirtualDisk()
+ disk_spec.device.key = -randint(20000, 24999)
+
+ # Check if RDM first as changing backing later on will erase some settings like disk_mode
+ if disk['disk_type'] == 'rdm':
+ disk_spec.device.backing = vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo()
+ disk_spec.device.backing.deviceName = disk['rdm_path']
+ disk_spec.device.backing.compatibilityMode = disk['compatibility_mode']
+ elif disk['disk_type'] == 'vpmemdisk':
+ disk_spec.device.backing = vim.vm.device.VirtualDisk.LocalPMemBackingInfo()
+ else:
+ disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+
+ if disk['disk_type'] != 'vpmemdisk':
+ disk_spec.device.backing.diskMode = disk['disk_mode']
+ disk_spec.device.backing.sharing = disk['sharing']
+
+ if disk['disk_type'] == 'thin':
+ disk_spec.device.backing.thinProvisioned = True
+ elif disk['disk_type'] == 'eagerzeroedthick':
+ disk_spec.device.backing.eagerlyScrub = True
+
+ disk_spec.device.controllerKey = ctl_key
+ disk_spec.device.unitNumber = disk['disk_unit_number']
+
+ return disk_spec
+
+ def reconfigure_vm(self, config_spec, device_type):
+ """
+ Reconfigure virtual machine after modifying device spec
+ Args:
+ config_spec: Config Spec
+ device_type: Type of device being modified
+
+ Returns: Boolean status 'changed' and actual task result
+
+ """
+ changed, results = (False, '')
+ try:
+ # Perform actual VM reconfiguration
+ task = self.vm.ReconfigVM_Task(spec=config_spec)
+ changed, results = wait_for_task(task)
+ except vim.fault.InvalidDeviceSpec as invalid_device_spec:
+ self.module.fail_json(msg="Failed to manage '%s' on given virtual machine due to invalid"
+ " device spec : %s" % (device_type, to_native(invalid_device_spec.msg)),
+ details="Please check ESXi server logs for more details.")
+ except vim.fault.RestrictedVersion as e:
+ self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
+ " product versioning restrictions: %s" % to_native(e.msg))
+
+ return changed, results
+
+ def get_ioandshares_diskconfig(self, disk_spec, disk):
+ io_disk_spec = vim.StorageResourceManager.IOAllocationInfo()
+ if 'iolimit' in disk:
+ io_disk_spec.limit = disk['iolimit']['limit']
+ if 'shares' in disk['iolimit']:
+ shares_spec = vim.SharesInfo()
+ shares_spec.level = disk['iolimit']['shares']['level']
+ if shares_spec.level == 'custom':
+ shares_spec.shares = disk['iolimit']['shares']['level_value']
+ io_disk_spec.shares = shares_spec
+ disk_spec.device.storageIOAllocation = io_disk_spec
+ if 'shares' in disk:
+ shares_spec = vim.SharesInfo()
+ shares_spec.level = disk['shares']['level']
+ if shares_spec.level == 'custom':
+ shares_spec.shares = disk['shares']['level_value']
+ io_disk_spec.shares = shares_spec
+ disk_spec.device.storageIOAllocation = io_disk_spec
+ return disk_spec
+
+ def get_sharing(self, disk, disk_type, disk_index):
+ """
+ Get the sharing mode of the virtual disk
+ Args:
+ disk: Virtual disk data object
+ disk_type: Disk type of the virtual disk
+ disk_index: Disk unit number at which disk needs to be attached
+
+ Returns:
+ sharing_mode: The sharing mode of the virtual disk
+
+ """
+ sharing = disk.get('sharing')
+ if sharing and disk_type != 'eagerzeroedthick' and disk_type != 'rdm':
+ self.module.fail_json(msg="Invalid 'sharing' mode specified for disk index [%s]. 'disk_mode'"
+ " must be 'eagerzeroedthick' or 'rdm' when 'sharing'." % disk_index)
+ if sharing:
+ sharing_mode = 'sharingMultiWriter'
+ else:
+ sharing_mode = 'sharingNone'
+ return sharing_mode
+
+ def ensure_disks(self, vm_obj=None):
+ """
+ Manage internal state of virtual machine disks
+ Args:
+ vm_obj: Managed object of virtual machine
+
+ """
+ # Set vm object
+ self.vm = vm_obj
+ vm_files_datastore = self.vm.config.files.vmPathName.split(' ')[0].strip('[]')
+ # Sanitize user input
+ disk_data = self.sanitize_disk_inputs()
+ ctl_changed = False
+ disk_change_list = list()
+ results = dict(changed=False, disk_data=None, disk_changes=dict())
+ new_added_disk_ctl = list()
+ sharesval = {'low': 500, 'normal': 1000, 'high': 2000}
+
+ # Deal with controller
+ for disk in disk_data:
+ ctl_found = False
+ # check if disk controller is in the new adding queue
+ for new_ctl in new_added_disk_ctl:
+ if new_ctl['controller_type'] == disk['controller_type'] and new_ctl['controller_number'] == disk['controller_number']:
+ ctl_found = True
+ break
+ # check if disk controller already exists
+ if not ctl_found:
+ for device in self.vm.config.hardware.device:
+ if isinstance(device, self.device_helper.disk_ctl_device_type[disk['controller_type']]):
+ if device.busNumber == disk['controller_number']:
+ ctl_found = True
+ break
+ # create disk controller when not found and disk state is present
+ if not ctl_found and disk['state'] == 'present':
+ # Create new controller
+ if disk['controller_type'] in self.device_helper.scsi_device_type.keys():
+ ctl_spec = self.device_helper.create_scsi_controller(disk['controller_type'], disk['controller_number'], disk['bus_sharing'])
+ elif disk['controller_type'] == 'sata':
+ ctl_spec = self.device_helper.create_sata_controller(disk['controller_number'])
+ elif disk['controller_type'] == 'nvme':
+ ctl_spec = self.device_helper.create_nvme_controller(disk['controller_number'])
+ new_added_disk_ctl.append({'controller_type': disk['controller_type'], 'controller_number': disk['controller_number']})
+ ctl_changed = True
+ self.config_spec.deviceChange.append(ctl_spec)
+ elif not ctl_found and disk['state'] == 'absent':
+ self.module.fail_json(msg="Not found 'controller_type': '%s', 'controller_number': '%s', so can not"
+ " remove this disk, please make sure 'controller_type' and"
+ " 'controller_number' are correct." % (disk['controller_type'], disk['controller_number']))
+ if ctl_changed:
+ self.reconfigure_vm(self.config_spec, 'Disk Controller')
+ self.config_spec = vim.vm.ConfigSpec()
+ self.config_spec.deviceChange = []
+
+ # Deal with Disks
+ for disk in disk_data:
+ disk_found = False
+ update_io = False
+ disk_change = False
+ ctl_found = False
+ for device in self.vm.config.hardware.device:
+ if isinstance(device, self.device_helper.disk_ctl_device_type[disk['controller_type']]) and device.busNumber == disk['controller_number']:
+ for disk_key in device.device:
+ disk_device = self.find_disk_by_key(disk_key, disk['disk_unit_number'])
+ if disk_device is not None:
+ disk_found = True
+ if disk['state'] == 'present':
+ disk_spec = vim.vm.device.VirtualDeviceSpec()
+ disk_spec.device = disk_device
+ # Deal with iolimit. Note that if iolimit is set, you HAVE TO both set limit and shares,
+ # or ansible will break with "'NoneType' object is not subscriptable"
+ if 'iolimit' in disk:
+ if disk['iolimit']['limit'] != disk_spec.device.storageIOAllocation.limit:
+ update_io = True
+
+ if 'shares' in disk['iolimit']:
+ # 'low', 'normal' and 'high' values in disk['iolimit']['shares']['level'] are converted to int values on vcenter side
+ if (disk['iolimit']['shares']['level'] != 'custom'
+ and sharesval.get(disk['iolimit']['shares']['level'], 0) != disk_spec.device.storageIOAllocation.shares.shares) or \
+ (disk['iolimit']['shares']['level'] == 'custom'
+ and disk['iolimit']['shares']['level_value'] != disk_spec.device.storageIOAllocation.shares.shares):
+ update_io = True
+
+ if update_io:
+ # set the operation to edit so that it knows to keep other settings
+ disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ disk_spec = self.get_ioandshares_diskconfig(disk_spec, disk)
+ disk_change = True
+
+ # If this is an RDM ignore disk size
+ if disk['disk_type'] != 'rdm':
+ if disk['size'] < disk_spec.device.capacityInKB:
+ self.module.fail_json(msg="Given disk size at disk index [%s] is smaller than found"
+ " (%d < %d). Reducing disks is not allowed."
+ % (disk['disk_index'], disk['size'],
+ disk_spec.device.capacityInKB))
+ if disk['size'] != disk_spec.device.capacityInKB:
+ # set the operation to edit so that it knows to keep other settings
+ disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ if disk['disk_type'] != 'vpmemdisk':
+ disk_spec = self.get_ioandshares_diskconfig(disk_spec, disk)
+ disk_spec.device.capacityInKB = disk['size']
+ disk_change = True
+
+ if disk_change:
+ self.config_spec.deviceChange.append(disk_spec)
+ disk_change_list.append(disk_change)
+ results['disk_changes'][disk['disk_index']] = "Disk reconfigured."
+
+ elif disk['state'] == 'absent':
+ # Disk already exists, deleting
+ disk_spec = vim.vm.device.VirtualDeviceSpec()
+ disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
+ if disk['destroy'] is True:
+ disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.destroy
+ disk_spec.device = disk_device
+ self.config_spec.deviceChange.append(disk_spec)
+ disk_change = True
+ disk_change_list.append(disk_change)
+ results['disk_changes'][disk['disk_index']] = "Disk deleted."
+ break
+
+ if disk_found:
+ break
+ if not disk_found and disk['state'] == 'present':
+ # Add new disk
+ disk_spec = self.create_disk(device.key, disk)
+ # get Storage DRS recommended datastore from the datastore cluster
+ if disk['disk_type'] == 'rdm':
+ # Since RDMs can be shared between two machines cluster_disk with rdm will
+ # invoke a copy of the existing disk instead of trying to create a new one which causes
+ # file lock issues in VSphere. This ensures we dont add a "create" operation.
+ if disk['filename'] is not None and disk['cluster_disk'] is True:
+ disk_spec.device.backing.fileName = disk['filename']
+ else:
+ disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
+ else:
+ if disk['filename'] is None:
+ if disk['datastore_cluster'] is not None:
+ datastore_name = self.get_recommended_datastore(datastore_cluster_obj=disk['datastore_cluster'], disk_spec_obj=disk_spec)
+ disk['datastore'] = find_obj(self.content, [vim.Datastore], datastore_name)
+
+ disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
+ disk_spec.device.capacityInKB = disk['size']
+ # Set backing filename when datastore is configured and not the same as VM datastore
+ # If datastore is not configured or backing filename is not set, default is VM datastore
+ if disk['datastore'] is not None and disk['datastore'].name != vm_files_datastore:
+ disk_spec.device.backing.datastore = disk['datastore']
+ disk_spec.device.backing.fileName = "[%s] %s/%s_%s_%s_%s.vmdk" % (disk['datastore'].name,
+ self.vm.name,
+ self.vm.name,
+ device.key,
+ str(disk['disk_unit_number']),
+ str(randint(1, 10000)))
+ elif disk['filename'] is not None:
+ disk_spec.device.backing.fileName = disk['filename']
+ disk_spec = self.get_ioandshares_diskconfig(disk_spec, disk)
+
+ self.config_spec.deviceChange.append(disk_spec)
+ disk_change = True
+ disk_change_list.append(disk_change)
+ results['disk_changes'][disk['disk_index']] = "Disk created."
+ break
+ if not disk_found and disk['state'] == 'absent':
+ self.module.fail_json(msg="Not found disk with 'controller_type': '%s',"
+ " 'controller_number': '%s', 'unit_number': '%s' to remove."
+ % (disk['controller_type'], disk['controller_number'], disk['disk_unit_number']))
+ if disk_change:
+ # Adding multiple disks in a single attempt raises weird errors
+ # So adding single disk at a time.
+ self.reconfigure_vm(self.config_spec, 'disks')
+ self.config_spec = vim.vm.ConfigSpec()
+ self.config_spec.deviceChange = []
+ if any(disk_change_list):
+ results['changed'] = True
+ results['disk_data'] = self.device_helper.gather_disk_info(self.vm)
+ self.module.exit_json(**results)
+
+ def sanitize_disk_inputs(self):
+ """
+ Check correctness of disk input provided by user
+
+ Returns: A list of dictionary containing disk information
+
+ """
+ disks_data = list()
+ if not self.desired_disks:
+ self.module.exit_json(changed=False, msg="No disks provided for virtual machine '%s' for management."
+ % self.vm.name)
+
+ for disk_index, disk in enumerate(self.desired_disks):
+ # Initialize default value for disk
+ current_disk = dict(disk_index=disk_index,
+ state='present',
+ destroy=True,
+ filename=None,
+ datastore_cluster=None,
+ datastore=None,
+ autoselect_datastore=True,
+ disk_unit_number=0,
+ controller_number=0,
+ disk_mode='persistent',
+ disk_type='thick',
+ sharing=False,
+ bus_sharing='noSharing',
+ cluster_disk=False)
+ # Type of Disk
+ if disk['type'] is not None:
+ current_disk['disk_type'] = disk['type']
+ if current_disk['disk_type'] == 'vpmemdisk':
+ if self.vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
+ self.module.fail_json(msg="Please make sure VM is in powered off state before doing vPMem disk"
+ " reconfiguration.")
+ disk['datastore'] = None
+ disk['autoselect_datastore'] = None
+ disk['filename'] = None
+ disk['disk_mode'] = None
+
+ # Check state
+ if disk['state'] is not None:
+ current_disk['state'] = disk['state']
+
+ # Check controller type
+ if disk['scsi_type'] is not None and disk['controller_type'] is None:
+ current_disk['controller_type'] = disk['scsi_type']
+ elif disk['scsi_type'] is None and disk['controller_type'] is None:
+ current_disk['controller_type'] = 'paravirtual'
+ elif disk['controller_type'] is not None and disk['scsi_type'] is None:
+ current_disk['controller_type'] = disk['controller_type']
+ else:
+ self.module.fail_json(msg="Please specify either 'scsi_type' or 'controller_type' for disk index [%s]."
+ % disk_index)
+ if current_disk['controller_type'] == 'ide':
+ if self.vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
+ self.module.fail_json(msg="Please make sure VM is in powered off state before doing IDE disk"
+ " reconfiguration.")
+
+ # Check controller bus number
+ if disk['scsi_controller'] is not None and disk['controller_number'] is None and disk['controller_type'] is None:
+ temp_disk_controller = disk['scsi_controller']
+ elif disk['controller_number'] is not None and disk['scsi_controller'] is None and disk['scsi_type'] is None:
+ temp_disk_controller = disk['controller_number']
+ else:
+ self.module.fail_json(msg="Please specify 'scsi_controller' with 'scsi_type', or 'controller_number'"
+ " with 'controller_type' under disk parameter for disk index [%s], which is"
+ " required while creating or configuring disk." % disk_index)
+ try:
+ disk_controller = int(temp_disk_controller)
+ except ValueError:
+ self.module.fail_json(msg="Invalid controller bus number '%s' specified"
+ " for disk index [%s]" % (temp_disk_controller, disk_index))
+ if current_disk['controller_type'] == 'ide' and disk_controller not in [0, 1]:
+ self.module.fail_json(msg="Invalid controller bus number '%s' specified"
+ " for disk index [%s], valid value is 0 or 1" % (disk_controller, disk_index))
+
+ current_disk['controller_number'] = disk_controller
+
+ try:
+ temp_disk_unit_number = int(disk['unit_number'])
+ except ValueError:
+ self.module.fail_json(msg="Invalid Disk unit number ID '%s' specified at index [%s]."
+ % (disk['unit_number'], disk_index))
+ if current_disk['controller_type'] in self.device_helper.scsi_device_type.keys():
+ # the Paravirtual SCSI Controller Supports up to 64 disks in vSphere 6.7. Using hardware
+ # version 14 or higher from the vm config should catch this appropriately.
+ hw_version = int(self.vm.config.version.split('-')[1])
+ if current_disk['controller_type'] == 'paravirtual' and hw_version >= 14:
+ if temp_disk_unit_number not in range(0, 64):
+ self.module.fail_json(msg="Invalid Disk unit number ID specified for disk [%s] at index [%s],"
+ " please specify value between 0 to 64 only (excluding 7)."
+ % (temp_disk_unit_number, disk_index))
+ if temp_disk_unit_number == 7:
+ self.module.fail_json(msg="Invalid Disk unit number ID specified for disk at index [%s], please"
+ " specify value other than 7 as it is reserved for SCSI Controller."
+ % disk_index)
+
+ else:
+ if temp_disk_unit_number not in range(0, 16):
+ self.module.fail_json(msg="Invalid Disk unit number ID specified for disk [%s] at index [%s],"
+ " please specify value between 0 to 15 only (excluding 7)."
+ % (temp_disk_unit_number, disk_index))
+ if temp_disk_unit_number == 7:
+ self.module.fail_json(msg="Invalid Disk unit number ID specified for disk at index [%s], please"
+ " specify value other than 7 as it is reserved for SCSI Controller."
+ % disk_index)
+ elif current_disk['controller_type'] == 'sata' and temp_disk_unit_number not in range(0, 30):
+ self.module.fail_json(msg="Invalid Disk unit number ID specified for SATA disk [%s] at index [%s],"
+ " please specify value between 0 to 29" % (temp_disk_unit_number, disk_index))
+ elif current_disk['controller_type'] == 'nvme' and temp_disk_unit_number not in range(0, 15):
+ self.module.fail_json(msg="Invalid Disk unit number ID specified for NVMe disk [%s] at index [%s],"
+ " please specify value between 0 to 14" % (temp_disk_unit_number, disk_index))
+ elif current_disk['controller_type'] == 'ide' and temp_disk_unit_number not in [0, 1]:
+ self.module.fail_json(msg="Invalid Disk unit number ID specified for IDE disk [%s] at index [%s],"
+ " please specify value 0 or 1" % (temp_disk_unit_number, disk_index))
+ current_disk['disk_unit_number'] = temp_disk_unit_number
+
+ # By default destroy file from datastore if 'destroy' parameter is not provided
+ if current_disk['state'] == 'absent':
+ current_disk['destroy'] = disk.get('destroy', True)
+ elif current_disk['state'] == 'present':
+ # Select datastore or datastore cluster
+ if disk['datastore'] is not None:
+ if disk['autoselect_datastore'] is not None:
+ self.module.fail_json(msg="Please specify either 'datastore' or 'autoselect_datastore' for"
+ " disk index [%s]" % disk_index)
+ # Check if given value is datastore or datastore cluster
+ datastore_name = disk['datastore']
+ datastore_cluster = find_obj(self.content, [vim.StoragePod], datastore_name)
+ datastore = find_obj(self.content, [vim.Datastore], datastore_name)
+ if datastore is None and datastore_cluster is None:
+ self.module.fail_json(msg="Failed to find datastore or datastore cluster named '%s' "
+ "in given configuration." % disk['datastore'])
+ if datastore_cluster:
+ # If user specified datastore cluster, keep track of that for determining datastore later
+ current_disk['datastore_cluster'] = datastore_cluster
+ elif datastore:
+ ds_datacenter = get_parent_datacenter(datastore)
+ if ds_datacenter.name != self.module.params['datacenter']:
+ self.module.fail_json(msg="Get datastore '%s' in datacenter '%s', not the configured"
+ " datacenter '%s'" % (datastore.name, ds_datacenter.name,
+ self.module.params['datacenter']))
+ current_disk['datastore'] = datastore
+ current_disk['autoselect_datastore'] = False
+ elif disk['autoselect_datastore'] is not None:
+ # Find datastore which fits requirement
+ datastores = get_all_objs(self.content, [vim.Datastore])
+ if not datastores:
+ self.module.fail_json(msg="Failed to gather information about available datastores in given"
+ " datacenter '%s'." % self.module.params['datacenter'])
+ datastore = None
+ datastore_freespace = 0
+ for ds in datastores:
+ if ds.summary.freeSpace > datastore_freespace:
+ # If datastore field is provided, filter destination datastores
+ datastore = ds
+ datastore_freespace = ds.summary.freeSpace
+ current_disk['datastore'] = datastore
+ else:
+ if current_disk['disk_type'] == 'vpmemdisk':
+ current_disk['datastore'] = None
+ current_disk['autoselect_datastore'] = False
+
+ if disk['filename'] is not None:
+ current_disk['filename'] = disk['filename']
+
+ if [x for x in disk.keys() if ((x.startswith('size_') or x == 'size') and disk[x] is not None)]:
+ # size, size_tb, size_gb, size_mb, size_kb
+ disk_size_parse_failed = False
+ if disk['size'] is not None:
+ size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
+ disk_size_m = size_regex.match(disk['size'])
+ if disk_size_m:
+ expected = disk_size_m.group(1)
+ unit = disk_size_m.group(2)
+ else:
+ disk_size_parse_failed = True
+ try:
+ if re.match(r'\d+\.\d+', expected):
+ # We found float value in string, let's typecast it
+ expected = float(expected)
+ else:
+ # We found int value in string, let's typecast it
+ expected = int(expected)
+ except (TypeError, ValueError, NameError):
+ disk_size_parse_failed = True
+ else:
+ # Even multiple size_ parameter provided by user,
+ # consider first value only
+ param = [x for x in disk.keys() if (x.startswith('size_') and disk[x] is not None)][0]
+ unit = param.split('_')[-1]
+ disk_size = disk[param]
+ if isinstance(disk_size, (float, int)):
+ disk_size = str(disk_size)
+
+ try:
+ if re.match(r'\d+\.\d+', disk_size):
+ # We found float value in string, let's typecast it
+ expected = float(disk_size)
+ else:
+ # We found int value in string, let's typecast it
+ expected = int(disk_size)
+ except (TypeError, ValueError, NameError):
+ disk_size_parse_failed = True
+
+ if disk_size_parse_failed:
+ # Common failure
+ self.module.fail_json(msg="Failed to parse disk size for disk index [%s],"
+ " please review value provided"
+ " using documentation." % disk_index)
+
+ disk_units = dict(tb=3, gb=2, mb=1, kb=0)
+ unit = unit.lower()
+ if unit in disk_units:
+ current_disk['size'] = expected * (1024 ** disk_units[unit])
+ else:
+ self.module.fail_json(msg="%s is not a supported unit for disk size for disk index [%s]."
+ " Supported units are ['%s']." % (unit, disk_index, "', '".join(disk_units.keys())))
+ elif current_disk['filename'] is None and disk['type'] != 'rdm':
+ # No size found but disk, fail. Excepting RDMs because the cluster_disk will need a filename.
+ self.module.fail_json(msg="No size, size_kb, size_mb, size_gb or size_tb"
+ " attribute found into disk index [%s] configuration." % disk_index)
+
+ # Mode of Disk
+ if disk['disk_mode'] is not None:
+ current_disk['disk_mode'] = disk['disk_mode']
+
+ if current_disk['disk_type'] != 'vpmemdisk':
+ # Sharing mode of disk
+ current_disk['sharing'] = self.get_sharing(disk, current_disk['disk_type'], disk_index)
+
+ if disk['shares'] is not None:
+ current_disk['shares'] = disk['shares']
+ if disk['iolimit'] is not None:
+ current_disk['iolimit'] = disk['iolimit']
+
+ # Deal with RDM disk needs. RDMS require some different values compared to Virtual Disks
+ if disk['type'] == 'rdm':
+ compatibility_mode = disk.get('compatibility_mode', 'physicalMode')
+ if compatibility_mode not in ['physicalMode', 'virtualMode']:
+ self.module.fail_json(msg="Invalid 'compatibility_mode' specified for disk index [%s]. Please specify"
+ "'compatibility_mode' value from ['physicalMode', 'virtualMode']." % disk_index)
+ current_disk['compatibility_mode'] = compatibility_mode
+
+ # RDMs need a path
+ if 'rdm_path' not in disk and 'filename' not in disk:
+ self.module.fail_json(msg="rdm_path and/or 'filename' needs must be specified when using disk type 'rdm'"
+ "for disk index [%s]" % disk_index)
+ else:
+ current_disk['rdm_path'] = disk.get('rdm_path')
+
+ if disk['filename'] and disk['rdm_path'] is None and disk['cluster_disk'] is False:
+ self.module.fail_json(msg=" 'filename' requires setting 'cluster_disk' to True when using disk type 'rdm' without a"
+ "'rdm_path' for disk index [%s]" % disk_index)
+ else:
+ current_disk['cluster_disk'] = disk.get('cluster_disk')
+
+ # Enable Physical or virtual SCSI Bus Sharing
+ if disk['bus_sharing']:
+ bus_sharing = disk.get('bus_sharing', 'noSharing')
+ if bus_sharing not in ['noSharing', 'physicalSharing', 'virtualSharing']:
+ self.module.fail_json(msg="Invalid SCSI 'bus_sharing' specied for disk index [%s]. Please "
+ "specify 'bus_sharing' value from "
+ "['noSharing', 'physicalSharing', 'virtualSharing']." % disk_index)
+ current_disk['bus_sharing'] = bus_sharing
+
+ disks_data.append(current_disk)
+
+ return disks_data
+
+ def get_recommended_datastore(self, datastore_cluster_obj, disk_spec_obj):
+ """
+ Return Storage DRS recommended datastore from datastore cluster
+ Args:
+ datastore_cluster_obj: datastore cluster managed object
+
+ Returns: Name of recommended datastore from the given datastore cluster,
+ Returns None if no datastore recommendation found.
+
+ """
+ # Check if Datastore Cluster provided by user is SDRS ready
+ sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
+ if sdrs_status:
+ # We can get storage recommendation only if SDRS is enabled on given datastorage cluster
+ disk_loc = vim.storageDrs.PodSelectionSpec.DiskLocator()
+ pod_config = vim.storageDrs.PodSelectionSpec.VmPodConfig()
+ pod_config.storagePod = datastore_cluster_obj
+ pod_config.disk = [disk_loc]
+ pod_sel_spec = vim.storageDrs.PodSelectionSpec()
+ pod_sel_spec.initialVmConfig = [pod_config]
+ storage_spec = vim.storageDrs.StoragePlacementSpec()
+ storage_spec.configSpec = vim.vm.ConfigSpec()
+ storage_spec.configSpec.deviceChange.append(disk_spec_obj)
+ storage_spec.resourcePool = self.vm.resourcePool
+ storage_spec.podSelectionSpec = pod_sel_spec
+ storage_spec.vm = self.vm
+ storage_spec.type = 'reconfigure'
+
+ try:
+ rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
+ rec_action = rec.recommendations[0].action[0]
+ return rec_action.destination.name
+ except Exception:
+ # There is some error so we fall back to general workflow
+ pass
+ datastore = None
+ datastore_freespace = 0
+ for ds in datastore_cluster_obj.childEntity:
+ if ds.summary.maintenanceMode == "inMaintenance":
+ continue
+ if ds.summary.freeSpace > datastore_freespace:
+ # If datastore field is provided, filter destination datastores
+ datastore = ds
+ datastore_freespace = ds.summary.freeSpace
+ if datastore:
+ return datastore.name
+ return None
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ folder=dict(type='str'),
+ datacenter=dict(type='str', required=True),
+ use_instance_uuid=dict(type='bool', default=False),
+ disk=dict(
+ type='list',
+ default=[],
+ elements='dict',
+ options=dict(
+ size=dict(type='str'),
+ size_kb=dict(type='int'),
+ size_mb=dict(type='int'),
+ size_gb=dict(type='int'),
+ size_tb=dict(type='int'),
+ type=dict(type='str', choices=['thin', 'eagerzeroedthick', 'thick', 'rdm', 'vpmemdisk']),
+ disk_mode=dict(type='str', choices=['persistent', 'independent_persistent', 'independent_nonpersistent']),
+ compatibility_mode=dict(type='str', choices=['physicalMode', 'virtualMode']),
+ rdm_path=dict(type='str'),
+ sharing=dict(type='bool', default=False),
+ datastore=dict(type='str'),
+ autoselect_datastore=dict(type='bool'),
+ scsi_controller=dict(type='int', choices=[0, 1, 2, 3]),
+ unit_number=dict(type='int', required=True),
+ scsi_type=dict(type='str', choices=['buslogic', 'lsilogic', 'paravirtual', 'lsilogicsas']),
+ destroy=dict(type='bool', default=True),
+ filename=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ controller_type=dict(type='str', choices=['buslogic', 'lsilogic', 'paravirtual', 'lsilogicsas', 'sata', 'nvme', 'ide']),
+ controller_number=dict(type='int', choices=[0, 1, 2, 3]),
+ bus_sharing=dict(type='str', choices=['noSharing', 'physicalSharing', 'virtualSharing'], default='noSharing'),
+ cluster_disk=dict(type='bool', default=False),
+ iolimit=dict(
+ type='dict',
+ options=dict(
+ limit=dict(type='int'),
+ shares=dict(
+ type='dict',
+ options=dict(
+ level=dict(type='str', choices=['low', 'high', 'normal', 'custom']),
+ level_value=dict(type='int'),
+ ),
+ ),
+ )),
+ shares=dict(
+ type='dict',
+ options=dict(
+ level=dict(type='str', choices=['low', 'high', 'normal', 'custom']),
+ level_value=dict(type='int'),
+ ),
+ ),
+ ),
+ ),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[['name', 'uuid', 'moid']],
+ )
+
+ if module.params['folder']:
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ pyv = PyVmomiHelper(module)
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ if not vm:
+ # We unable to find the virtual machine user specified
+ # Bail out
+ vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
+ module.fail_json(msg="Unable to manage disks for non-existing"
+ " virtual machine '%s'." % vm_id)
+
+ # VM exists
+ try:
+ pyv.ensure_disks(vm_obj=vm)
+ except Exception as exc:
+ module.fail_json(msg="Failed to manage disks for virtual machine"
+ " '%s' with exception : %s" % (vm.name,
+ to_native(exc)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_disk_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_disk_info.py
new file mode 100644
index 000000000..1d12b2573
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_disk_info.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, NAER William Leemans (@bushvin) <willie@elaba.net>
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_disk_info
+short_description: Gather info about disks of given virtual machine
+description:
+ - This module can be used to gather information about disks belonging to given virtual machine.
+ - All parameters and VMware object names are case sensitive.
+author:
+ - Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
+options:
+ name:
+ description:
+ - Name of the virtual machine.
+ - This is required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to gather information if known, this is VMware's unique identifier.
+ - This is required parameter, if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is required parameter, only if multiple VMs are found with same name.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ description:
+ - The datacenter name to which virtual machine belongs to.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather disk info from virtual machine using UUID
+ community.vmware.vmware_guest_disk_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: ha-datacenter
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ delegate_to: localhost
+ register: disk_info
+
+- name: Gather disk info from virtual machine using name
+ community.vmware.vmware_guest_disk_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: ha-datacenter
+ name: VM_225
+ delegate_to: localhost
+ register: disk_info
+
+- name: Gather disk info from virtual machine using moid
+ community.vmware.vmware_guest_disk_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: ha-datacenter
+ moid: vm-42
+ delegate_to: localhost
+ register: disk_info
+'''
+
+RETURN = r'''
+guest_disk_info:
+ description: metadata about the virtual machine's disks
+ returned: always
+ type: dict
+ sample: {
+ "0": {
+ "backing_datastore": "datastore2",
+ "backing_disk_mode": "persistent",
+ "backing_diskmode": "persistent",
+ "backing_eagerlyscrub": false,
+ "backing_filename": "[datastore2] VM_225/VM_225.vmdk",
+ "backing_thinprovisioned": false,
+ "backing_type": "FlatVer2",
+ "backing_writethrough": false,
+ "backing_uuid": "200C3A00-f82a-97af-02ff-62a595f0020a",
+ "capacity_in_bytes": 10485760,
+ "capacity_in_kb": 10240,
+ "controller_bus_number": 0,
+ "controller_key": 1000,
+ "controller_type": "paravirtual",
+ "key": 2000,
+ "label": "Hard disk 1",
+ "summary": "10,240 KB",
+ "unit_number": 0
+ },
+ "1": {
+ "backing_datastore": "datastore3",
+ "backing_devicename": "vml.012345678901234567890123456789012345678901234567890123",
+ "backing_disk_mode": "independent_persistent",
+ "backing_diskmode": "independent_persistent",
+ "backing_filename": "[datastore3] VM_226/VM_226.vmdk",
+ "backing_lunuuid": "012345678901234567890123456789012345678901234567890123",
+ "backing_type": "RawDiskMappingVer1",
+ "backing_uuid": null,
+ "capacity_in_bytes": 15728640,
+ "capacity_in_kb": 15360,
+ "controller_bus_number": 0,
+ "controller_key": 1000,
+ "controller_type": "paravirtual",
+ "key": 2001,
+ "label": "Hard disk 3",
+ "summary": "15,360 KB",
+ "unit_number": 1
+ },
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible_collections.community.vmware.plugins.module_utils.vm_device_helper import PyVmomiDeviceHelper
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ datacenter=dict(type='str', required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ supports_check_mode=True,
+ )
+
+ if module.params['folder']:
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ pyv = PyVmomi(module)
+ device_helper = PyVmomiDeviceHelper(module)
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ if vm:
+ # VM exists
+ try:
+ module.exit_json(guest_disk_info=device_helper.gather_disk_info(vm))
+ except Exception as exc:
+ module.fail_json(msg="Failed to gather information with exception : %s" % to_text(exc))
+ else:
+ # We unable to find the virtual machine user specified
+ # Bail out
+ vm_id = (module.params.get('uuid') or module.params.get('moid') or module.params.get('name'))
+ module.fail_json(msg="Unable to gather disk information for non-existing VM %s" % vm_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_file_operation.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_file_operation.py
new file mode 100644
index 000000000..69a0a112a
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_file_operation.py
@@ -0,0 +1,520 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Stéphane Travassac <stravassac@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_file_operation
+short_description: Files operation in a VMware guest operating system without network
+description:
+ - Module to copy a file to a VM, fetch a file from a VM and create or delete a directory in the guest OS.
+author:
+ - Stéphane Travassac (@stravassac)
+notes:
+ - Only the first match against vm_id is used, even if there are multiple matches
+options:
+ datacenter:
+ description:
+ - The datacenter hosting the virtual machine.
+ - If set, it will help to speed up virtual machine search.
+ type: str
+ cluster:
+ description:
+ - The cluster hosting the virtual machine.
+ - If set, it will help to speed up virtual machine search.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute path to find an existing guest or create the new guest.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter
+ - Used only if C(vm_id_type) is C(inventory_path).
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ - ' folder: vm/folder2'
+ - ' folder: folder2'
+ type: str
+ vm_id:
+ description:
+ - Name of the virtual machine to work with.
+ required: true
+ type: str
+ vm_id_type:
+ description:
+ - The VMware identification method by which the virtual machine will be identified.
+ default: vm_name
+ choices:
+ - 'uuid'
+ - 'instance_uuid'
+ - 'dns_name'
+ - 'inventory_path'
+ - 'vm_name'
+ type: str
+ vm_username:
+ description:
+ - The user to login in to the virtual machine.
+ required: true
+ type: str
+ vm_password:
+ description:
+ - The password used to login-in to the virtual machine.
+ required: true
+ type: str
+ directory:
+ description:
+ - Create or delete a directory.
+ - Can be used to create temp directory inside guest using mktemp operation.
+ - mktemp sets variable C(dir) in the result with the name of the new directory.
+ - mktemp operation option is added in version 2.8.
+ suboptions:
+ operation:
+ description:
+ - Operation to perform.
+ type: str
+ required: true
+ choices: [ 'create', 'delete', 'mktemp' ]
+ path:
+ type: str
+ description:
+ - Directory path.
+ - Required for C(create) or C(remove).
+ prefix:
+ description:
+ - Temporary directory prefix.
+ - Required for C(mktemp).
+ type: str
+ suffix:
+ type: str
+ description:
+ - Temporary directory suffix.
+ - Required for C(mktemp).
+ recurse:
+ type: bool
+ description:
+ - Not required.
+ default: false
+ required: false
+ type: dict
+ copy:
+ description:
+ - Copy file to vm without requiring network.
+ suboptions:
+ src:
+ description:
+ - File source absolute or relative.
+ required: true
+ type: str
+ dest:
+ description:
+ - File destination, path must be exist.
+ required: true
+ type: str
+ overwrite:
+ description:
+ - Overwrite or not.
+ type: bool
+ default: false
+ required: false
+ type: dict
+ fetch:
+ description:
+ - Get file from virtual machine without requiring network.
+ suboptions:
+ src:
+ description:
+ - The file on the remote system to fetch.
+ - This I(must) be a file, not a directory.
+ required: true
+ type: str
+ dest:
+ description:
+ - File destination on localhost, path must be exist.
+ required: true
+ type: str
+ required: false
+ type: dict
+ timeout:
+ description:
+ - Timeout seconds for fetching or copying a file.
+ type: int
+ default: 100
+ version_added: '3.1.0'
+
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create directory inside a vm
+ community.vmware.vmware_guest_file_operation:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ vm_id: "{{ guest_name }}"
+ vm_username: "{{ guest_username }}"
+ vm_password: "{{ guest_userpassword }}"
+ directory:
+ path: "/test"
+ operation: create
+ recurse: false
+ delegate_to: localhost
+
+- name: copy file to vm
+ community.vmware.vmware_guest_file_operation:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ vm_id: "{{ guest_name }}"
+ vm_username: "{{ guest_username }}"
+ vm_password: "{{ guest_userpassword }}"
+ copy:
+ src: "files/test.zip"
+ dest: "/root/test.zip"
+ overwrite: false
+ delegate_to: localhost
+
+- name: fetch file from vm
+ community.vmware.vmware_guest_file_operation:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ vm_id: "{{ guest_name }}"
+ vm_username: "{{ guest_username }}"
+ vm_password: "{{ guest_userpassword }}"
+ fetch:
+ src: "/root/test.zip"
+ dest: "files/test.zip"
+ delegate_to: localhost
+
+- name: If a timeout error occurs, specify a high(er) timeout value
+ community.vmware.vmware_guest_file_operation:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ vm_id: "{{ guest_name }}"
+ vm_username: "{{ guest_username }}"
+ vm_password: "{{ guest_userpassword }}"
+ timeout: 10000
+ copy:
+ src: "files/test.zip"
+ dest: "/root/test.zip"
+ overwrite: false
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils import urls
+from ansible.module_utils._text import to_bytes, to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi, find_cluster_by_name, find_datacenter_by_name,
+ find_vm_by_id, vmware_argument_spec)
+
+
+class VmwareGuestFileManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareGuestFileManager, self).__init__(module)
+ datacenter_name = module.params['datacenter']
+ cluster_name = module.params['cluster']
+ folder = module.params['folder']
+ self.timeout = module.params['timeout']
+
+ datacenter = None
+ if datacenter_name:
+ datacenter = find_datacenter_by_name(self.content, datacenter_name)
+ if not datacenter:
+ module.fail_json(msg="Unable to find %(datacenter)s datacenter" % module.params)
+
+ cluster = None
+ if cluster_name:
+ cluster = find_cluster_by_name(self.content, cluster_name, datacenter)
+ if not cluster:
+ module.fail_json(msg="Unable to find %(cluster)s cluster" % module.params)
+
+ if module.params['vm_id_type'] == 'inventory_path':
+ vm = find_vm_by_id(self.content, vm_id=module.params['vm_id'], vm_id_type="inventory_path", folder=folder)
+ else:
+ vm = find_vm_by_id(self.content,
+ vm_id=module.params['vm_id'],
+ vm_id_type=module.params['vm_id_type'],
+ datacenter=datacenter,
+ cluster=cluster)
+
+ if not vm:
+ module.fail_json(msg='Unable to find virtual machine.')
+
+ self.vm = vm
+ try:
+ result = dict(changed=False)
+ if module.params['directory']:
+ result = self.directory()
+ if module.params['copy']:
+ result = self.copy()
+ if module.params['fetch']:
+ result = self.fetch()
+ module.exit_json(**result)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=to_native(method_fault.msg))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ def directory(self):
+ result = dict(changed=True, uuid=self.vm.summary.config.uuid)
+ vm_username = self.module.params['vm_username']
+ vm_password = self.module.params['vm_password']
+
+ recurse = bool(self.module.params['directory']['recurse'])
+ operation = self.module.params['directory']['operation']
+ path = self.module.params['directory']['path']
+ prefix = self.module.params['directory']['prefix']
+ suffix = self.module.params['directory']['suffix']
+ creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password)
+ file_manager = self.content.guestOperationsManager.fileManager
+ if operation in ("create", "mktemp"):
+ try:
+ if operation == "create":
+ file_manager.MakeDirectoryInGuest(vm=self.vm,
+ auth=creds,
+ directoryPath=path,
+ createParentDirectories=recurse)
+ else:
+ newdir = file_manager.CreateTemporaryDirectoryInGuest(vm=self.vm, auth=creds,
+ prefix=prefix, suffix=suffix)
+ result['dir'] = newdir
+ except vim.fault.FileAlreadyExists as file_already_exists:
+ result['changed'] = False
+ result['msg'] = "Guest directory %s already exist: %s" % (path,
+ to_native(file_already_exists.msg))
+ except vim.fault.GuestPermissionDenied as permission_denied:
+ self.module.fail_json(msg="Permission denied for path %s : %s" % (path,
+ to_native(permission_denied.msg)),
+ uuid=self.vm.summary.config.uuid)
+ except vim.fault.InvalidGuestLogin as invalid_guest_login:
+ self.module.fail_json(msg="Invalid guest login for user %s : %s" % (vm_username,
+ to_native(invalid_guest_login.msg)),
+ uuid=self.vm.summary.config.uuid)
+ # other exceptions
+ except Exception as e:
+ self.module.fail_json(msg="Failed to Create directory into VM VMware exception : %s" % to_native(e),
+ uuid=self.vm.summary.config.uuid)
+
+ if operation == "delete":
+ try:
+ file_manager.DeleteDirectoryInGuest(vm=self.vm, auth=creds, directoryPath=path,
+ recursive=recurse)
+ except vim.fault.FileNotFound as file_not_found:
+ result['changed'] = False
+ result['msg'] = "Guest directory %s not exists %s" % (path,
+ to_native(file_not_found.msg))
+ except vim.fault.FileFault as e:
+ self.module.fail_json(msg="FileFault : %s" % e.msg,
+ uuid=self.vm.summary.config.uuid)
+ except vim.fault.GuestPermissionDenied as permission_denied:
+ self.module.fail_json(msg="Permission denied for path %s : %s" % (path,
+ to_native(permission_denied.msg)),
+ uuid=self.vm.summary.config.uuid)
+ except vim.fault.InvalidGuestLogin as invalid_guest_login:
+ self.module.fail_json(msg="Invalid guest login for user %s : %s" % (vm_username,
+ to_native(invalid_guest_login.msg)),
+ uuid=self.vm.summary.config.uuid)
+ # other exceptions
+ except Exception as e:
+ self.module.fail_json(msg="Failed to Delete directory into Vm VMware exception : %s" % to_native(e),
+ uuid=self.vm.summary.config.uuid)
+
+ return result
+
+ def fetch(self):
+ result = dict(changed=True, uuid=self.vm.summary.config.uuid)
+ vm_username = self.module.params['vm_username']
+ vm_password = self.module.params['vm_password']
+ hostname = self.module.params['hostname']
+ dest = self.module.params["fetch"]['dest']
+ src = self.module.params['fetch']['src']
+ creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password)
+ file_manager = self.content.guestOperationsManager.fileManager
+
+ try:
+ fileTransferInfo = file_manager.InitiateFileTransferFromGuest(vm=self.vm, auth=creds,
+ guestFilePath=src)
+ url = fileTransferInfo.url
+ url = url.replace("*", hostname)
+ resp, info = urls.fetch_url(self.module, url, method="GET", timeout=self.timeout)
+ if info.get('status') != 200 or not resp:
+ self.module.fail_json(msg="Failed to fetch file : %s" % info.get('msg', ''), body=info.get('body', ''))
+ try:
+ with open(dest, "wb") as local_file:
+ local_file.write(resp.read())
+ except Exception as e:
+ self.module.fail_json(msg="local file write exception : %s" % to_native(e),
+ uuid=self.vm.summary.config.uuid)
+ except vim.fault.FileNotFound as file_not_found:
+ self.module.fail_json(msg="Guest file %s does not exist : %s" % (src, to_native(file_not_found.msg)),
+ uuid=self.vm.summary.config.uuid)
+ except vim.fault.FileFault as e:
+ self.module.fail_json(msg="FileFault : %s" % to_native(e.msg),
+ uuid=self.vm.summary.config.uuid)
+ except vim.fault.GuestPermissionDenied:
+ self.module.fail_json(msg="Permission denied to fetch file %s" % src,
+ uuid=self.vm.summary.config.uuid)
+ except vim.fault.InvalidGuestLogin:
+ self.module.fail_json(msg="Invalid guest login for user %s" % vm_username,
+ uuid=self.vm.summary.config.uuid)
+ # other exceptions
+ except Exception as e:
+ self.module.fail_json(msg="Failed to Fetch file from Vm VMware exception : %s" % to_native(e),
+ uuid=self.vm.summary.config.uuid)
+
+ return result
+
+ def copy(self):
+ result = dict(changed=True, uuid=self.vm.summary.config.uuid)
+ vm_username = self.module.params['vm_username']
+ vm_password = self.module.params['vm_password']
+ hostname = self.module.params['hostname']
+ overwrite = self.module.params["copy"]["overwrite"]
+ dest = self.module.params["copy"]['dest']
+ src = self.module.params['copy']['src']
+ b_src = to_bytes(src, errors='surrogate_or_strict')
+
+ if not os.path.exists(b_src):
+ self.module.fail_json(msg="Source %s not found" % src)
+ if not os.access(b_src, os.R_OK):
+ self.module.fail_json(msg="Source %s not readable" % src)
+ if os.path.isdir(b_src):
+ self.module.fail_json(msg="copy does not support copy of directory: %s" % src)
+
+ data = None
+ with open(b_src, "rb") as local_file:
+ data = local_file.read()
+ file_size = os.path.getsize(b_src)
+
+ creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password)
+ file_attributes = vim.vm.guest.FileManager.FileAttributes()
+ file_manager = self.content.guestOperationsManager.fileManager
+ try:
+ url = file_manager.InitiateFileTransferToGuest(vm=self.vm, auth=creds, guestFilePath=dest,
+ fileAttributes=file_attributes, overwrite=overwrite,
+ fileSize=file_size)
+ url = url.replace("*", hostname)
+ resp, info = urls.fetch_url(self.module, url, data=data, method="PUT", timeout=self.timeout)
+
+ status_code = info["status"]
+ if status_code != 200:
+ self.module.fail_json(msg='problem during file transfer, http message:%s' % info,
+ uuid=self.vm.summary.config.uuid)
+ except vim.fault.FileAlreadyExists:
+ result['changed'] = False
+ result['msg'] = "Guest file %s already exists" % dest
+ return result
+ except vim.fault.FileFault as e:
+ self.module.fail_json(msg="FileFault:%s" % to_native(e.msg),
+ uuid=self.vm.summary.config.uuid)
+ except vim.fault.GuestPermissionDenied as permission_denied:
+ self.module.fail_json(msg="Permission denied to copy file into "
+ "destination %s : %s" % (dest, to_native(permission_denied.msg)),
+ uuid=self.vm.summary.config.uuid)
+ except vim.fault.InvalidGuestLogin as invalid_guest_login:
+ self.module.fail_json(msg="Invalid guest login for user"
+ " %s : %s" % (vm_username, to_native(invalid_guest_login.msg)))
+ # other exceptions
+ except Exception as e:
+ self.module.fail_json(msg="Failed to Copy file to Vm VMware exception : %s" % to_native(e),
+ uuid=self.vm.summary.config.uuid)
+ return result
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ datacenter=dict(type='str'),
+ cluster=dict(type='str'),
+ folder=dict(type='str'),
+ vm_id=dict(type='str', required=True),
+ vm_id_type=dict(
+ default='vm_name',
+ type='str',
+ choices=['inventory_path', 'uuid', 'instance_uuid', 'dns_name', 'vm_name']),
+ vm_username=dict(type='str', required=True),
+ vm_password=dict(type='str', no_log=True, required=True),
+ directory=dict(
+ type='dict',
+ default=None,
+ options=dict(
+ operation=dict(required=True, type='str', choices=['create', 'delete', 'mktemp']),
+ path=dict(required=False, type='str'),
+ prefix=dict(required=False, type='str'),
+ suffix=dict(required=False, type='str'),
+ recurse=dict(required=False, type='bool', default=False)
+ )
+ ),
+ copy=dict(
+ type='dict',
+ default=None,
+ options=dict(
+ src=dict(required=True, type='str'),
+ dest=dict(required=True, type='str'),
+ overwrite=dict(required=False, type='bool', default=False)
+ )
+ ),
+ fetch=dict(
+ type='dict',
+ default=None,
+ options=dict(
+ src=dict(required=True, type='str'),
+ dest=dict(required=True, type='str'),
+ )
+ ),
+ timeout=dict(type='int', default=100)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
+ required_if=[['vm_id_type', 'inventory_path', ['folder']]],
+ mutually_exclusive=[['directory', 'copy', 'fetch']],
+ required_one_of=[['directory', 'copy', 'fetch']],
+ )
+
+ if module.params['directory']:
+ if module.params['directory']['operation'] in ('create', 'delete') and not module.params['directory']['path']:
+ module.fail_json(msg='directory.path is required when operation is "create" or "delete"')
+ if module.params['directory']['operation'] == 'mktemp' and not (module.params['directory']['prefix'] and module.params['directory']['suffix']):
+ module.fail_json(msg='directory.prefix and directory.suffix are required when operation is "mktemp"')
+
+ if module.params['vm_id_type'] == 'inventory_path' and not module.params['folder']:
+ module.fail_json(msg='Folder is required parameter when vm_id_type is inventory_path')
+
+ VmwareGuestFileManager(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_find.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_find.py
new file mode 100644
index 000000000..7197613b7
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_find.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_find
+short_description: Find the folder path(s) for a virtual machine by name or UUID
+description:
+ - Find the folder path(s) for a virtual machine by name or UUID
+author:
+ - Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
+options:
+ name:
+ description:
+ - Name of the VM to work with.
+ - This is required if C(uuid) parameter is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to manage if known, this is VMware's BIOS UUID by default.
+ - This is required if C(name) parameter is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Find Guest's Folder using name
+ community.vmware.vmware_guest_find:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: testvm
+ delegate_to: localhost
+ register: vm_folder
+
+- name: Find Guest's Folder using UUID
+ community.vmware.vmware_guest_find:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: 38c4c89c-b3d7-4ae6-ae4e-43c5118eae49
+ delegate_to: localhost
+ register: vm_folder
+'''
+
+RETURN = r'''
+folders:
+ description: List of folders for user specified virtual machine
+ returned: on success
+ type: list
+ sample: [
+ '/DC0/vm',
+ ]
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.name = self.params['name']
+ self.uuid = self.params['uuid']
+ self.use_instance_uuid = self.params['use_instance_uuid']
+
+ def getvm_folder_paths(self):
+ results = []
+ vms = []
+
+ if self.uuid:
+ if self.use_instance_uuid:
+ vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="instance_uuid")
+ else:
+ vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid")
+ if vm_obj is None:
+ self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid)
+ vms = [vm_obj]
+
+ elif self.name:
+ objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
+ for temp_vm_object in objects:
+ if temp_vm_object.obj.name == self.name:
+ vms.append(temp_vm_object.obj)
+
+ for vm in vms:
+ folder_path = self.get_vm_path(self.content, vm)
+ results.append(folder_path)
+
+ return results
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_one_of=[['name', 'uuid']],
+ mutually_exclusive=[['name', 'uuid']],
+ )
+
+ pyv = PyVmomiHelper(module)
+ # Check if the VM exists before continuing
+ folders = pyv.getvm_folder_paths()
+
+ # VM already exists
+ if folders:
+ try:
+ module.exit_json(folders=folders)
+ except Exception as exc:
+ module.fail_json(msg="Folder enumeration failed with exception %s" % to_native(exc))
+ else:
+ module.fail_json(msg="Unable to find folders for virtual machine %s" % (
+ module.params.get('name')
+ or module.params.get('uuid')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_info.py
new file mode 100644
index 000000000..fb4735625
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_info.py
@@ -0,0 +1,320 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This module is also sponsored by E.T.A.I. (www.etai.fr)
+# Copyright (C) 2018 James E. King III (@jeking3) <jking@apache.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_info
+short_description: Gather info about a single VM
+description:
+ - Gather information about a single VM on a VMware ESX cluster.
+author:
+ - Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+options:
+ name:
+ description:
+ - Name of the VM to work with
+ - This is required if C(uuid) or C(moid) is not supplied.
+ type: str
+ name_match:
+ description:
+ - If multiple VMs matching the name, use the first or last found
+ default: 'first'
+ choices: ['first', 'last']
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to manage if known, this is VMware's unique identifier.
+ - This is required if C(name) or C(moid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is required if name is supplied.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ description:
+ - Destination datacenter for the deploy operation
+ required: true
+ type: str
+ tags:
+ description:
+ - Whether to show tags or not.
+ - If set C(true), shows tags information. Returns a list of tag names.
+ - If set C(false), hides tags information.
+ - vSphere Automation SDK is required.
+ default: false
+ type: bool
+ tag_details:
+ description:
+ - If set C(true), detail information about 'tags' returned.
+ - Without this flag, the 'tags' returns a list of tag names.
+ - With this flag, the 'tags' returns a list of dict about tag information with additional details like category name, category id, and tag id.
+ - This parameter is added to maintain backward compatability.
+ default: false
+ type: bool
+ schema:
+ description:
+ - Specify the output schema desired.
+ - The 'summary' output schema is the legacy output from the module
+ - The 'vsphere' output schema is the vSphere API class definition
+ which requires pyvmomi>6.7.1
+ choices: ['summary', 'vsphere']
+ default: 'summary'
+ type: str
+ properties:
+ description:
+ - Specify the properties to retrieve.
+ - If not specified, all properties are retrieved (deeply).
+ - Results are returned in a structure identical to the vsphere API.
+ - 'Example:'
+ - ' properties: ['
+ - ' "config.hardware.memoryMB",'
+ - ' "config.hardware.numCPU",'
+ - ' "guest.disk",'
+ - ' "overallStatus"'
+ - ' ]'
+ - Only valid when C(schema) is C(vsphere).
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info from standalone ESXi server having datacenter as 'ha-datacenter'
+ community.vmware.vmware_guest_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: ha-datacenter
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ delegate_to: localhost
+ register: info
+
+- name: Gather some info from a guest using the vSphere API output schema
+ community.vmware.vmware_guest_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ name: "{{ vm_name }}"
+ schema: "vsphere"
+ properties: ["config.hardware.memoryMB", "guest.disk", "overallStatus"]
+ delegate_to: localhost
+ register: info
+
+- name: Gather some information about a guest using MoID
+ community.vmware.vmware_guest_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ moid: vm-42
+ schema: "vsphere"
+ properties: ["config.hardware.memoryMB", "guest.disk", "overallStatus"]
+ delegate_to: localhost
+ register: vm_moid_info
+
+- name: Gather Managed object ID (moid) from a guest using the vSphere API output schema for REST Calls
+ community.vmware.vmware_guest_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ name: "{{ vm_name }}"
+ schema: "vsphere"
+ properties:
+ - _moId
+ delegate_to: localhost
+ register: moid_info
+
+- name: Gather detailed information about tags and category associated with the given VM
+ community.vmware.vmware_guest_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ name: "{{ vm_name }}"
+ tags: true
+ tag_details: true
+ register: detailed_tag_info
+'''
+
+RETURN = r'''
+instance:
+ description: metadata about the virtual machine
+ returned: always
+ type: dict
+ sample: {
+ "advanced_settings": {},
+ "annotation": "",
+ "current_snapshot": null,
+ "customvalues": {},
+ "guest_consolidation_needed": false,
+ "guest_question": null,
+ "guest_tools_status": "guestToolsNotRunning",
+ "guest_tools_version": "10247",
+ "hw_cores_per_socket": 1,
+ "hw_datastores": [
+ "ds_226_3"
+ ],
+ "hw_esxi_host": "10.76.33.226",
+ "hw_eth0": {
+ "addresstype": "assigned",
+ "ipaddresses": null,
+ "label": "Network adapter 1",
+ "macaddress": "00:50:56:87:a5:9a",
+ "macaddress_dash": "00-50-56-87-a5-9a",
+ "portgroup_key": null,
+ "portgroup_portkey": null,
+ "summary": "VM Network"
+ },
+ "hw_files": [
+ "[ds_226_3] ubuntu_t/ubuntu_t.vmx",
+ "[ds_226_3] ubuntu_t/ubuntu_t.nvram",
+ "[ds_226_3] ubuntu_t/ubuntu_t.vmsd",
+ "[ds_226_3] ubuntu_t/vmware.log",
+ "[ds_226_3] u0001/u0001.vmdk"
+ ],
+ "hw_folder": "/DC0/vm/Discovered virtual machine",
+ "hw_guest_full_name": null,
+ "hw_guest_ha_state": null,
+ "hw_guest_id": null,
+ "hw_interfaces": [
+ "eth0"
+ ],
+ "hw_is_template": false,
+ "hw_memtotal_mb": 1024,
+ "hw_name": "ubuntu_t",
+ "hw_power_status": "poweredOff",
+ "hw_processor_count": 1,
+ "hw_product_uuid": "4207072c-edd8-3bd5-64dc-903fd3a0db04",
+ "hw_version": "vmx-13",
+ "instance_uuid": "5007769d-add3-1e12-f1fe-225ae2a07caf",
+ "ipv4": null,
+ "ipv6": null,
+ "module_hw": true,
+ "snapshots": [],
+ "tags": [
+ "backup"
+ ],
+ "vnc": {},
+ "moid": "vm-42",
+ "vimref": "vim.VirtualMachine:vm-42"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+try:
+ from com.vmware.vapi.std_client import DynamicID # noqa: F401
+ HAS_VSPHERE = True
+except ImportError:
+ HAS_VSPHERE = False
+
+
+class VmwareTag(VmwareRestClient):
+ def __init__(self, module):
+ super(VmwareTag, self).__init__(module)
+ self.tag_service = self.api_client.tagging.Tag
+ self.tag_association_svc = self.api_client.tagging.TagAssociation
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ name_match=dict(type='str', choices=['first', 'last'], default='first'),
+ uuid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ moid=dict(type='str'),
+ folder=dict(type='str'),
+ datacenter=dict(type='str', required=True),
+ tags=dict(type='bool', default=False),
+ schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'),
+ properties=dict(type='list', elements='str'),
+ tag_details=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_one_of=[['name', 'uuid', 'moid']],
+ supports_check_mode=True)
+
+ if module.params.get('folder'):
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ if module.params['schema'] != 'vsphere' and module.params.get('properties'):
+ module.fail_json(msg="The option 'properties' is only valid when the schema is 'vsphere'")
+
+ pyv = PyVmomi(module)
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ # VM already exists
+ if vm:
+ try:
+ if module.params['schema'] == 'summary':
+ instance = pyv.gather_facts(vm)
+ else:
+ instance = pyv.to_json(vm, module.params['properties'])
+ if module.params.get('tags'):
+ if not HAS_VSPHERE:
+ module.fail_json(msg="Unable to find 'vCloud Suite SDK' Python library which is required."
+ " Please refer this URL for installation steps"
+ " - https://code.vmware.com/web/sdk/vsphere-automation-python")
+
+ vm_rest_client = VmwareTag(module)
+ tags = []
+ if module.params.get('tag_details'):
+ tags = vm_rest_client.get_tags_for_vm(vm_mid=vm._moId)
+ else:
+ dynamic_obj = DynamicID(type='VirtualMachine', id=vm._moId)
+ tags = vm_rest_client.get_vm_tags(vm_rest_client.tag_service,
+ vm_rest_client.tag_association_svc,
+ vm_mid=dynamic_obj)
+ instance.update(tags=tags)
+ module.exit_json(instance=instance)
+ except Exception as exc:
+ module.fail_json(msg="Information gathering failed with exception %s" % to_text(exc))
+ else:
+ vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
+ module.fail_json(msg="Unable to gather information for non-existing VM %s" % vm_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_instant_clone.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_instant_clone.py
new file mode 100644
index 000000000..3e487ca12
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_instant_clone.py
@@ -0,0 +1,591 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, Ansible Project
+# Copyright: (c) 2021, Anant Chopra <chopraan@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_instant_clone
+short_description: Instant Clone VM
+
+description:
+- This module can be used for Creating a powered-on Instant Clone of a virtual machine.
+- All variables and VMware object names are case sensitive.
+- M(community.vmware.vmware_guest) module is needed for creating a VM with poweredon state which would be used as a parent VM.
+- M(community.vmware.vmware_guest_powerstate) module is also needed to poweroff the instant cloned module.
+- The powered off VM would in turn be deleted by again using M(community.vmware.vmware_guest) module.
+- Thus M(community.vmware.vmware_guest) module is necessary for removing Instant Cloned VM when VMs being created in testing environment.
+- Also GuestOS Customization has now been added with guestinfo_vars parameter.
+- The Parent VM must have The Guest customization Engine for instant Clone to customize Guest OS.
+- Only Linux Os in Parent VM enable support for native vSphere Guest Customization for Instant Clone in vSphere 7.
+options:
+ name:
+ description:
+ - Name of the Cloned virtual machine.
+ type: str
+ aliases: ['vm_name']
+ required: true
+ parent_vm:
+ description:
+ - Name of the parent virtual machine.
+ - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the vm instance to clone from, this is VMware's unique identifier.
+ - This is a required parameter, if parameter C(parent_vm) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the vm instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(parent_vm) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ host:
+ description:
+ - Name of the ESX Host in datacenter in which to place cloned VM.
+ - The host has to be a member of the cluster that contains the resource pool.
+ - Required with I(resource_pool) to find resource pool details. This will be used as additional information when there are resource pools with same name.
+ type: str
+ aliases: ['esxi_hostname']
+ required: true
+ datastore:
+ description:
+ - The name of the datastore or the datastore cluster.
+ - If datastore cluster name is specified, module will find the Storage DRS recommended datastore in that cluster.
+ type: str
+ required: true
+ datacenter:
+ description:
+ - Name of the datacenter, where VM to be deployed.
+ type: str
+ required: true
+ folder:
+ description:
+ - Destination folder, absolute path to deploy the cloned vm.
+ - This parameter is case sensitive.
+ - 'Examples:'
+ - 'folder: ha-datacenter/vm'
+ - 'folder: /datacenter1/vm'
+ type: str
+ resource_pool:
+ description:
+ - Name of the resource pool in datacenter in which to place deployed VM.
+ - Required if I(cluster) is not specified.
+ - For default or non-unique resource pool names, specify I(host) and I(cluster).
+ - C(Resources) is the default name of resource pool.
+ type: str
+ required: false
+ vm_username:
+ description:
+ - The user to login-in to the virtual machine.
+ - Only required when using guest customization feature.
+ required: false
+ type: str
+ vm_password:
+ description:
+ - The password used to login-in to the virtual machine.
+ - Only required when using guest customization feature.
+ required: false
+ type: str
+ guestinfo_vars:
+ description:
+ - Provides GuestOS Customization functionality in instant cloned VM.
+ - A list of key value pairs that will be passed to the destination VM.
+ - These pairs should be used to provide user-defined customization to differentiate the destination VM from the source VM.
+ suboptions:
+ hostname:
+ description:
+ - hostname is used to obtain the DNS(Domain Name System) name and set the Guest system's hostname.
+ type: str
+ ipaddress:
+ description:
+ - ipaddress is used to set the ipaddress in Instant Cloned Guest Operating System.
+ type: str
+ netmask:
+ description:
+ - netmask is used to set the netmask in Instant Cloned Guest Operating System.
+ type: str
+ gateway:
+ description:
+ - netmask is used to set the netmask in Instant Cloned Guest Operating System.
+ type: str
+ dns:
+ description:
+ - dns is used to set the dns in Instant Cloned Guest Operating System..
+ type: str
+ domain:
+ description:
+ - domain is used to set A fully qualified domain name (FQDN) or complete domain name for Instant Cloned Guest operating System.
+ type: str
+ type: list
+ elements: dict
+ wait_vm_tools:
+ description:
+ - Whether waiting until vm tools start after rebooting an instant clone vm.
+ type: bool
+ default: true
+ wait_vm_tools_timeout:
+ description:
+ - Define a timeout (in seconds) for I(the wait_vm_tools) parameter.
+ type: int
+ default: 300
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+author:
+- Anant Chopra (@Anant99-sys)
+
+'''
+
+EXAMPLES = r'''
+- name: Instant Clone a VM
+ community.vmware.vmware_guest_instant_clone:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ folder: "{{ f0 }}"
+ datastore: "{{ rw_datastore }}"
+ datacenter: "{{ dc1 }}"
+ host: "{{ esxi1 }}"
+ name: "{{ Clone_vm }}"
+ parent_vm: "{{ testvm_1 }}"
+ resource_pool: "{{ test_resource_001 }}"
+ register: vm_clone
+ delegate_to: localhost
+
+- name: set state to poweroff the Cloned VM
+ community.vmware.vmware_guest_powerstate:
+ validate_certs: false
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "cloned_vm_from_vm_cluster"
+ folder: "{{ f0 }}"
+ state: powered-off
+ register: poweroff_instant_clone_from_vm_when_cluster
+ delegate_to: localhost
+
+- name: Clean VM
+ community.vmware.vmware_guest:
+ validate_certs: false
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "cloned_vm_from_vm_cluster"
+ datacenter: "{{ dc1 }}"
+ state: absent
+ register: delete_instant_clone_from_vm_when_cluster
+ ignore_errors: true
+ delegate_to: localhost
+
+- name: Instant Clone a VM with guest_customization
+ community.vmware.vmware_guest_instant_clone:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ vm_username: "root"
+ vm_password: "SuperSecret"
+ validate_certs: false
+ folder: "{{ f0 }}"
+ datastore: "{{ rw_datastore }}"
+ datacenter: "{{ dc1 }}"
+ host: "{{ esxi1 }}"
+ guestinfo_vars:
+ - hostname: "{{ guestinfo.ic.hostname }}"
+ ipaddress: "{{ guestinfo.ic.ipaddress }}"
+ netmask: "{{ guestinfo.ic.netmask }}"
+ gateway: "{{ guestinfo.ic.gateway }}"
+ dns: "{{ guestinfo.ic.dns }}"
+ domain: "{{ guestinfo.ic.domain }}"
+ name: "Instant_clone_guest_customize"
+ parent_vm: "test_vm1"
+ resource_pool: DC0_C0_RP1
+ register: Instant_cloned_guest_customize
+ delegate_to: localhost
+
+- name: Instant Clone a VM when skipping optional params
+ community.vmware.vmware_guest_instant_clone:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ name: "{{ Clone_vm }}"
+ parent_vm: "{{ testvm_1 }}"
+ datacenter: "{{ dc1 }}"
+ datastore: "{{ rw_datastore }}"
+ host: "{{ esxi1 }}"
+ register: VM_clone_optional_arguments
+ delegate_to: localhost
+
+- name: Instant clone in check mode
+ community.vmware.vmware_guest_instant_clone:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ folder: "{{ f0 }}"
+ datastore: "{{ rw_datastore }}"
+ datacenter: "{{ dc1 }}"
+ host: "{{ esx1 }}"
+ name: "{{ Clone_vm }}"
+ parent_vm: "{{ testvm_2 }}"
+ resource_pool: "{{ test_resource_001 }}"
+ check_mode: true
+ register: check_mode_clone
+ delegate_to: localhost
+- debug:
+ var: check_mode_clone
+
+'''
+
+RETURN = r'''
+vm_info:
+ description:
+ - metadata about the virtual machine
+ - added instance_uuid from version 1.12.0
+ returned: always
+ type: dict
+ sample: {
+ "vm_name": "",
+ "vcenter": "",
+ "host": "",
+ "datastore": "",
+ "vm_folder": "",
+ "instance_uuid": ""
+ }
+'''
+
+import time
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ TaskError,
+ find_vm_by_name,
+ find_vm_by_id,
+ connect_to_api,
+ vmware_argument_spec,
+ find_obj,
+ wait_for_task,
+ set_vm_power_state
+)
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+
+class VmwareGuestInstantClone(PyVmomi):
+ def __init__(self, module):
+ """Constructor."""
+ super().__init__(module)
+ self.instant_clone_spec = vim.vm.InstantCloneSpec()
+ self.relocate_spec = vim.vm.RelocateSpec()
+ self.vm_name = self.params.get('name')
+ self.parent_vm = self.params.get('parent_vm')
+ self.datacenter = self.params.get('datacenter')
+ self.datastore = self.params.get('datastore')
+ self.hostname = self.params.get('hostname')
+ self.folder = self.params.get('folder')
+ self.resource_pool = self.params.get('resource_pool')
+ self.host = self.params.get('host')
+ self.username = self.params.get('username')
+ self.password = self.params.get('password')
+ self.validate_certs = self.params.get('validate_certs')
+ self.moid = self.params.get('moid')
+ self.uuid = self.params.get('uuid')
+ self.port = self.params.get('port')
+ self.use_instance_uuid = self.params.get('use_instance_uuid')
+ self.wait_vm_tools = self.params.get('wait_vm_tools')
+ self.wait_vm_tools_timeout = self.params.get('wait_vm_tools_timeout')
+ self.guestinfo_vars = self.params.get('guestinfo_vars')
+
+ def get_new_vm_info(self, vm):
+ # to check if vm has been cloned in the destination vc
+ # query for the vm in destination vc
+ # get the host and datastore info
+ info = {}
+ vm_obj = find_vm_by_name(content=self.destination_content, vm_name=vm)
+ if vm_obj is None:
+ self.module.fail_json(msg="Newly Instant cloned VM is not found in the VCenter")
+
+ vm_facts = self.gather_facts(vm_obj)
+ info['vm_name'] = vm
+ info['vcenter'] = self.hostname
+ info['host'] = vm_facts['hw_esxi_host']
+ info['datastore'] = vm_facts['hw_datastores']
+ info['vm_folder'] = vm_facts['hw_folder']
+ info['instance_uuid'] = vm_facts['instance_uuid']
+ return info
+
+ def Instant_clone(self):
+ # clone the vm on VC
+ if self.vm_obj is None:
+ vm_id = self.parent_vm or self.uuid or self.moid
+ self.module.fail_json(msg="Failed to find the VM/template with %s" % vm_id)
+ try:
+ task = self.vm_obj.InstantClone_Task(spec=self.instant_clone_spec)
+ wait_for_task(task)
+ vm_info = self.get_new_vm_info(self.vm_name)
+ result = {'changed': True, 'failed': False, 'vm_info': vm_info}
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+
+ self.destination_content = connect_to_api(
+ self.module,
+ hostname=self.hostname,
+ username=self.username,
+ password=self.password,
+ port=self.port,
+ validate_certs=self.validate_certs)
+
+ vm_IC = find_vm_by_name(content=self.destination_content, vm_name=self.params['name'])
+ if vm_IC and self.params.get('guestinfo_vars'):
+ guest_custom_mng = self.destination_content.guestCustomizationManager
+ # Make an object for authentication in a guest OS
+ auth_obj = vim.vm.guest.NamePasswordAuthentication()
+
+ guest_user = self.params.get('vm_username')
+ guest_password = self.params.get('vm_password')
+ auth_obj.username = guest_user
+ auth_obj.password = guest_password
+
+ guestinfo_vars = self.params.get('guestinfo_vars')
+ # Make a spec object to customize Guest OS
+ customization_spec = vim.vm.customization.Specification()
+ customization_spec.globalIPSettings = vim.vm.customization.GlobalIPSettings()
+ customization_spec.globalIPSettings.dnsServerList = [guestinfo_vars[0]['dns']]
+ # Make an identity object to do linux prep
+ # The params are reflected the specified following after rebooting OS
+ customization_spec.identity = vim.vm.customization.LinuxPrep()
+ customization_spec.identity.domain = guestinfo_vars[0]['domain']
+ customization_spec.identity.hostName = vim.vm.customization.FixedName()
+ customization_spec.identity.hostName.name = guestinfo_vars[0]['hostname']
+
+ customization_spec.nicSettingMap = []
+ adapter_mapping_obj = vim.vm.customization.AdapterMapping()
+ adapter_mapping_obj.adapter = vim.vm.customization.IPSettings()
+ adapter_mapping_obj.adapter.ip = vim.vm.customization.FixedIp()
+ adapter_mapping_obj.adapter.ip.ipAddress = guestinfo_vars[0]['ipaddress']
+ adapter_mapping_obj.adapter.subnetMask = guestinfo_vars[0]['netmask']
+ adapter_mapping_obj.adapter.gateway = [guestinfo_vars[0]['gateway']]
+
+ customization_spec.nicSettingMap.append(adapter_mapping_obj)
+
+ try:
+ task_guest = guest_custom_mng.CustomizeGuest_Task(vm_IC, auth_obj, customization_spec)
+ wait_for_task(task_guest)
+ vm_info = self.get_new_vm_info(self.vm_name)
+ result = {'changed': True, 'failed': False, 'vm_info': vm_info}
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+
+ # Should require rebooting to reflect customization parameters to instant clone vm.
+ instant_vm_obj = find_vm_by_id(content=self.content, vm_id=vm_info['instance_uuid'], vm_id_type='instance_uuid')
+ set_vm_power_state(content=self.content, vm=instant_vm_obj, state='rebootguest', force=False)
+
+ if self.wait_vm_tools:
+ interval = 15
+ # Wait vm tools is started after rebooting.
+ while self.wait_vm_tools_timeout > 0:
+ if instant_vm_obj.guest.toolsRunningStatus != 'guestToolsRunning':
+ break
+ self.wait_vm_tools_timeout -= interval
+ time.sleep(interval)
+
+ while self.wait_vm_tools_timeout > 0:
+ if instant_vm_obj.guest.toolsRunningStatus == 'guestToolsRunning':
+ break
+ self.wait_vm_tools_timeout -= interval
+ time.sleep(interval)
+
+ if self.wait_vm_tools_timeout <= 0:
+ self.module.fail_json(msg="Timeout has been reached for waiting to start the vm tools.")
+
+ return result
+
+ def sanitize_params(self):
+ '''
+ Verify user-provided parameters
+ '''
+ # connect to host/VC
+ self.destination_content = connect_to_api(
+ self.module,
+ hostname=self.hostname,
+ username=self.username,
+ password=self.password,
+ port=self.port,
+ validate_certs=self.validate_certs)
+
+ use_instance_uuid = self.params.get('use_instance_uuid') or False
+
+ if 'parent_vm' in self.params and self.params['parent_vm']:
+ self.vm_obj = find_vm_by_name(content=self.destination_content, vm_name=self.parent_vm)
+
+ elif 'uuid' in self.params and self.params['uuid']:
+ if not use_instance_uuid:
+ self.vm_obj = find_vm_by_id(content=self.destination_content, vm_id=self.params['uuid'], vm_id_type="uuid")
+ elif use_instance_uuid:
+ self.vm_obj = find_vm_by_id(content=self.destination_content,
+ vm_id=self.params['uuid'],
+ vm_id_type="instance_uuid")
+
+ elif 'moid' in self.params and self.params['moid']:
+ self.vm_obj = vim.VirtualMachine(self.params['moid'], self.si._stub)
+
+ if self.vm_obj is None:
+ vm_id = self.parent_vm or self.uuid or self.moid
+ self.module.fail_json(msg="Failed to find the VM/template with %s" % vm_id)
+
+ vm = find_vm_by_name(content=self.destination_content, vm_name=self.params['name'])
+ if vm:
+ self.module.exit_json(changed=False, msg="A VM with the given name already exists")
+
+ self.datacenter = self.find_datacenter_by_name(self.params['datacenter'])
+
+ # datacentre check
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter not found.")
+
+ datastore_name = self.params['datastore']
+ datastore_cluster = find_obj(self.destination_content, [vim.StoragePod], datastore_name)
+
+ if datastore_cluster:
+ # If user specified datastore cluster so get recommended datastore
+ datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
+ # Check if get_recommended_datastore or user specified datastore exists or not
+ # datastore check
+ self.datastore = self.find_datastore_by_name(datastore_name=datastore_name)
+
+ if self.datastore is None:
+ self.module.fail_json(msg="Datastore not found.")
+
+ if self.params['folder']:
+ self.folder = self.find_folder_by_fqpn(folder_name=self.params['folder'], datacenter_name=self.params['datacenter'], folder_type='vm')
+ if self.folder is None:
+ self.module.fail_json(msg="Folder not found.")
+ else:
+ self.folder = self.datacenter.vmFolder
+
+ self.host = self.find_hostsystem_by_name(host_name=self.params['host'])
+ if self.host is None:
+ self.module.fail_json(msg="Host not found.")
+
+ if self.params['resource_pool']:
+ self.resource_pool = self.find_resource_pool_by_name(resource_pool_name=self.params['resource_pool'])
+ if self.resource_pool is None:
+ self.module.fail_json(msg="Resource Pool not found.")
+ else:
+ self.resource_pool = self.host.parent.resourcePool
+
+ if self.params['guestinfo_vars']:
+ self.guestinfo_vars = self.dict_to_optionvalues()
+ else:
+ self.guestinfo_vars = None
+
+ def dict_to_optionvalues(self):
+ optionvalues = []
+ for dictionary in self.params['guestinfo_vars']:
+ for key, value in dictionary.items():
+ opt = vim.option.OptionValue()
+ (opt.key, opt.value) = ("guestinfo.ic." + key, value)
+ optionvalues.append(opt)
+
+ return optionvalues
+
+ def populate_specs(self):
+
+ # populate relocate spec
+ self.relocate_spec.datastore = self.datastore
+ self.relocate_spec.pool = self.resource_pool
+ self.relocate_spec.folder = self.folder
+ # populate Instant clone spec
+ self.instant_clone_spec.name = self.vm_name
+ self.instant_clone_spec.location = self.relocate_spec
+ self.instant_clone_spec.config = self.guestinfo_vars
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True, aliases=['vm_name']),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ vm_username=dict(type='str', required=False),
+ vm_password=dict(type='str', no_log=True, required=False),
+ datacenter=dict(type='str', required=True),
+ datastore=dict(type='str', required=True),
+ use_instance_uuid=dict(type='bool', default=False),
+ host=dict(type='str', required=True, aliases=['esxi_hostname']),
+ folder=dict(type='str', required=False),
+ resource_pool=dict(type='str', required=False),
+ parent_vm=dict(type='str'),
+ wait_vm_tools=dict(type='bool', default=True),
+ wait_vm_tools_timeout=dict(type='int', default=300),
+ guestinfo_vars=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ ipaddress=dict(type='str'),
+ netmask=dict(type='str'),
+ gateway=dict(type='str'),
+ dns=dict(type='str'),
+ domain=dict(type='str'),
+ hostname=dict(type='str'),
+ ),
+ ),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['uuid', 'parent_vm', 'moid'],
+ ],
+ mutually_exclusive=[
+ ['uuid', 'parent_vm', 'moid'],
+ ],
+ required_together=[
+ ['vm_username', 'vm_password', 'guestinfo_vars']
+ ]
+ )
+ result = {'failed': False, 'changed': False}
+
+ if module.check_mode:
+ result.update(
+ vm_name=module.params['name'],
+ host=module.params['hostname'],
+ datastore=module.params['datastore'],
+ vm_folder=module.params['folder'],
+ changed=True,
+ desired_operation='Create VM with check mode'
+ )
+ module.exit_json(**result)
+
+ clone_manager = VmwareGuestInstantClone(module)
+ clone_manager.sanitize_params()
+ clone_manager.populate_specs()
+ result = clone_manager.Instant_clone()
+
+ if result['failed']:
+ module.fail_json(**result)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_move.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_move.py
new file mode 100644
index 000000000..cef4e8276
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_move.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Jose Angel Munoz <josea.munoz () gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_move
+short_description: Moves virtual machines in vCenter
+description:
+ - This module can be used to move virtual machines between folders.
+author:
+ - Jose Angel Munoz (@imjoseangel)
+options:
+ name:
+ description:
+ - Name of the existing virtual machine to move.
+ - This is required if C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the virtual machine to manage if known, this is VMware's unique identifier.
+ - This is required if C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ name_match:
+ description:
+ - If multiple virtual machines matching the name, use the first or last found.
+ default: 'first'
+ choices: [ first, last ]
+ type: str
+ dest_folder:
+ description:
+ - Absolute path to move an existing guest
+ - The dest_folder should include the datacenter. ESX's datacenter is ha-datacenter.
+ - This parameter is case sensitive.
+ - 'Examples:'
+ - ' dest_folder: /ha-datacenter/vm'
+ - ' dest_folder: ha-datacenter/vm'
+ - ' dest_folder: /datacenter1/vm'
+ - ' dest_folder: datacenter1/vm'
+ - ' dest_folder: /datacenter1/vm/folder1'
+ - ' dest_folder: datacenter1/vm/folder1'
+ - ' dest_folder: /folder1/datacenter1/vm'
+ - ' dest_folder: folder1/datacenter1/vm'
+ - ' dest_folder: /folder1/datacenter1/vm/folder2'
+ required: true
+ type: str
+ datacenter:
+ description:
+ - Destination datacenter for the move operation
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Move Virtual Machine
+ community.vmware.vmware_guest_move:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: datacenter
+ name: testvm-1
+ dest_folder: "/{{ datacenter }}/vm"
+ delegate_to: localhost
+
+- name: Move Virtual Machine using MoID
+ community.vmware.vmware_guest_move:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: datacenter
+ moid: vm-42
+ dest_folder: "/{{ datacenter }}/vm"
+ delegate_to: localhost
+
+- name: Get VM UUID
+ vmware_guest_facts:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ folder: "/{{ datacenter }}/vm"
+ name: "{{ vm_name }}"
+ delegate_to: localhost
+ register: vm_facts
+
+- name: Get UUID from previous task and pass it to this task
+ community.vmware.vmware_guest_move:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ uuid: "{{ vm_facts.instance.hw_product_uuid }}"
+ dest_folder: "/DataCenter/vm/path/to/new/folder/where/we/want"
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: metadata about the virtual machine
+ returned: always
+ type: dict
+ sample: {
+ "annotation": null,
+ "current_snapshot": null,
+ "customvalues": {},
+ "guest_consolidation_needed": false,
+ "guest_question": null,
+ "guest_tools_status": null,
+ "guest_tools_version": "0",
+ "hw_cores_per_socket": 1,
+ "hw_datastores": [
+ "LocalDS_0"
+ ],
+ "hw_esxi_host": "DC0_H0",
+ "hw_eth0": {
+ "addresstype": "generated",
+ "ipaddresses": null,
+ "label": "ethernet-0",
+ "macaddress": "00:0c:29:6b:34:2c",
+ "macaddress_dash": "00-0c-29-6b-34-2c",
+ "summary": "DVSwitch: 43cdd1db-1ef7-4016-9bbe-d96395616199"
+ },
+ "hw_files": [
+ "[LocalDS_0] DC0_H0_VM0/DC0_H0_VM0.vmx"
+ ],
+ "hw_folder": "/F0/DC0/vm/F0",
+ "hw_guest_full_name": null,
+ "hw_guest_ha_state": null,
+ "hw_guest_id": "otherGuest",
+ "hw_interfaces": [
+ "eth0"
+ ],
+ "hw_is_template": false,
+ "hw_memtotal_mb": 32,
+ "hw_name": "DC0_H0_VM0",
+ "hw_power_status": "poweredOn",
+ "hw_processor_count": 1,
+ "hw_product_uuid": "581c2808-64fb-45ee-871f-6a745525cb29",
+ "instance_uuid": "8bcb0b6e-3a7d-4513-bf6a-051d15344352",
+ "ipv4": null,
+ "ipv6": null,
+ "module_hw": true,
+ "snapshots": []
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ name_match=dict(
+ type='str', choices=['first', 'last'], default='first'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ dest_folder=dict(type='str', required=True),
+ datacenter=dict(type='str', required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ mutually_exclusive=[
+ ['name', 'uuid', 'moid']
+ ],
+ supports_check_mode=True
+ )
+
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['dest_folder'] = module.params['dest_folder'].rstrip('/')
+ pyv = PyVmomiHelper(module)
+ search_index = pyv.content.searchIndex
+
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ # VM exists
+ if vm:
+ try:
+ vm_path = pyv.get_vm_path(pyv.content, vm).lstrip('/')
+ if module.params['name']:
+ vm_name = module.params['name']
+ else:
+ vm_name = vm.name
+
+ vm_full = vm_path + '/' + vm_name
+ folder = search_index.FindByInventoryPath(module.params['dest_folder'])
+ if folder is None:
+ module.fail_json(msg="Folder name and/or path does not exist")
+ vm_to_move = search_index.FindByInventoryPath(vm_full)
+ if module.check_mode:
+ module.exit_json(changed=True, instance=pyv.gather_facts(vm))
+ if vm_path != module.params['dest_folder'].lstrip('/'):
+ move_task = folder.MoveInto([vm_to_move])
+ changed, err = wait_for_task(move_task)
+ if changed:
+ module.exit_json(
+ changed=True, instance=pyv.gather_facts(vm))
+ else:
+ module.exit_json(instance=pyv.gather_facts(vm))
+ except Exception as exc:
+ module.fail_json(msg="Failed to move VM with exception %s" %
+ to_native(exc))
+ else:
+ if module.check_mode:
+ module.exit_json(changed=False)
+ vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
+ module.fail_json(msg="Unable to find VM %s to move to %s" % (vm_id, module.params.get('dest_folder')))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_network.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_network.py
new file mode 100644
index 000000000..5cde25c03
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_network.py
@@ -0,0 +1,829 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2020, Ansible Project
+# Copyright: (c) 2019, Diane Wang <dianew@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_network
+short_description: Manage network adapters of specified virtual machine in given vCenter infrastructure
+description:
+ - This module is used to add, reconfigure, remove network adapter of given virtual machine.
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+notes:
+ - For backwards compatibility network_data is returned when using the gather_network_info parameter
+options:
+ name:
+ description:
+ - Name of virtual machine
+ - Required if C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - vm uuid
+ - Required if C(name) or C(moid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - Required if C(uuid) or C(name) is not supplied.
+ type: str
+ folder:
+ description:
+ - Folder location of given VM, this is only required when there's multiple VM's with the same name.
+ type: str
+ datacenter:
+ default: ha-datacenter
+ description:
+ - Datacenter the VM belongs to.
+ type: str
+ cluster:
+ description:
+ - Name of cluster where VM belongs to.
+ type: str
+ esxi_hostname:
+ description:
+ - The hostname of the ESXi host where the VM belongs to.
+ type: str
+ mac_address:
+ description:
+ - MAC address of the NIC that should be altered, if a MAC address is not supplied a new nic will be created.
+ - Required when I(state=absent).
+ type: str
+ label:
+ description:
+ - 'Label of the NIC that should be altered. C(mac_address) or C(label) should be set to get the corresponding
+ device to reconfigure.'
+ - Alter the name of the network adapter.
+ type: str
+ vlan_id:
+ description:
+ - VLAN id associated with the network.
+ type: int
+ network_name:
+ description:
+ - Name of network in vSphere.
+ type: str
+ device_type:
+ default: vmxnet3
+ description:
+ - Type of virtual network device.
+ - 'Valid choices are - C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov), C(pvrdma).'
+ type: str
+ pvrdma_device_protocol:
+ version_added: '3.3.0'
+ description:
+ - The PVRDMA device protocol used. Valid choices are - C(rocev1), C(rocev2).
+ - This parameter is only used on the VM with hardware version >=14 and <= 19.
+ type: str
+ switch:
+ description:
+ - Name of the (dv)switch for destination network, this is only required for dvswitches.
+ type: str
+ guest_control:
+ default: true
+ description:
+ - Enables guest control over whether the connectable device is connected.
+ type: bool
+ state:
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - NIC state.
+ - When C(state=present), a nic will be added if a mac address or label does not previously exists or is unset.
+ - When C(state=absent), the I(mac_address) parameter has to be set.
+ type: str
+ start_connected:
+ default: true
+ description:
+ - If NIC should be connected to network on startup.
+ type: bool
+ wake_onlan:
+ default: false
+ description:
+ - Enable wake on LAN.
+ type: bool
+ connected:
+ default: true
+ description:
+ - If NIC should be connected to the network.
+ type: bool
+ directpath_io:
+ default: false
+ description:
+ - Enable Universal Pass-through (UPT).
+ - Only compatible with the C(vmxnet3) device type.
+ type: bool
+ physical_function_backing:
+ version_added: '2.3.0'
+ type: str
+ description:
+ - If set, specifies the PCI ID of the physical function to use as backing for a SR-IOV network adapter.
+ - This option is only compatible for SR-IOV network adapters.
+ virtual_function_backing:
+ version_added: '2.3.0'
+ type: str
+ description:
+ - If set, specifies the PCI ID of the physical function to use as backing for a SR-IOV network adapter.
+ - This option is only compatible for SR-IOV network adapters.
+ allow_guest_os_mtu_change:
+ version_added: '2.3.0'
+ default: true
+ type: bool
+ description:
+ - Allows the guest OS to change the MTU on a SR-IOV network adapter.
+ - This option is only compatible for SR-IOV network adapters.
+ force:
+ default: false
+ description:
+ - Force adapter creation even if an existing adapter is attached to the same network.
+ type: bool
+ gather_network_info:
+ aliases:
+ - gather_network_facts
+ default: false
+ description:
+ - Return information about current guest network adapters.
+ type: bool
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: change network for 00:50:56:11:22:33 on vm01.domain.fake
+ community.vmware.vmware_guest_network:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ name: vm01.domain.fake
+ mac_address: 00:50:56:11:22:33
+ network_name: admin-network
+ state: present
+
+- name: add a nic on network with vlan id 2001 for 422d000d-2000-ffff-0000-b00000000000
+ community.vmware.vmware_guest_network:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ uuid: 422d000d-2000-ffff-0000-b00000000000
+ vlan_id: 2001
+
+- name: remove nic with mac 00:50:56:11:22:33 from vm01.domain.fake
+ community.vmware.vmware_guest_network:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ mac_address: 00:50:56:11:22:33
+ name: vm01.domain.fake
+ state: absent
+
+- name: add multiple nics to vm01.domain.fake
+ community.vmware.vmware_guest_network:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ name: vm01.domain.fake
+ state: present
+ vlan_id: "{{ item.vlan_id | default(omit) }}"
+ network_name: "{{ item.network_name | default(omit) }}"
+ connected: "{{ item.connected | default(omit) }}"
+ loop:
+ - vlan_id: 2000
+ connected: false
+ - network_name: guest-net
+ connected: true
+'''
+
+RETURN = r'''
+network_info:
+ description: metadata about the virtual machine network adapters
+ returned: always
+ type: list
+ sample:
+ "network_info": [
+ {
+ "mac_address": "00:50:56:AA:AA:AA",
+ "allow_guest_ctl": true,
+ "connected": true,
+ "device_type": "vmxnet3",
+ "label": "Network adapter 2",
+ "network_name": "admin-net",
+ "start_connected": true,
+ "switch": "vSwitch0",
+ "unit_number": 8,
+ "vlan_id": 10,
+ "wake_onlan": false
+ },
+ {
+ "mac_address": "00:50:56:BB:BB:BB",
+ "allow_guest_ctl": true,
+ "connected": true,
+ "device_type": "vmxnet3",
+ "label": "Network adapter 1",
+ "network_name": "guest-net",
+ "start_connected": true,
+ "switch": "vSwitch0",
+ "unit_number": 7,
+ "vlan_id": 10,
+ "wake_onlan": true
+ }
+ ]
+network_data:
+ description: For backwards compatibility, metadata about the virtual machine network adapters
+ returned: when using gather_network_info parameter
+ type: dict
+ sample:
+ "network_data": {
+ '0': {
+ "mac_addr": "00:50:56:AA:AA:AA",
+ "mac_address": "00:50:56:AA:AA:AA",
+ "allow_guest_ctl": true,
+ "connected": true,
+ "device_type": "vmxnet3",
+ "label": "Network adapter 2",
+ "name": "admin-net",
+ "network_name": "admin-net",
+ "start_connected": true,
+ "switch": "vSwitch0",
+ "unit_number": 8,
+ "vlan_id": 10,
+ "wake_onlan": false
+ },
+ '1': {
+ "mac_addr": "00:50:56:BB:BB:BB",
+ "mac_address": "00:50:56:BB:BB:BB",
+ "allow_guest_ctl": true,
+ "connected": true,
+ "device_type": "vmxnet3",
+ "label": "Network adapter 1",
+ "name": "guest-net",
+ "network_name": "guest-net",
+ "start_connected": true,
+ "switch": "vSwitch0",
+ "unit_number": 7,
+ "vlan_id": 10,
+ "wake_onlan": true
+ }
+ }
+
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+import copy
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, TaskError, vmware_argument_spec, wait_for_task
+from ansible_collections.community.vmware.plugins.module_utils.vm_device_helper import PyVmomiDeviceHelper
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.change_detected = False
+ self.device_helper = PyVmomiDeviceHelper(self.module)
+
+ def _get_network_object(self, vm_obj):
+ '''
+ return network object matching given parameters
+ :param vm_obj: vm object
+ :return: network object
+ :rtype: object
+ '''
+ if not self.params['esxi_hostname'] or not self.params['cluster']:
+ compute_resource = vm_obj.runtime.host
+ else:
+ compute_resource = self._get_compute_resource_by_name()
+
+ pg_lookup = {}
+ vlan_id = self.params['vlan_id']
+ network_name = self.params['network_name']
+ switch_name = self.params['switch']
+
+ for pg in vm_obj.runtime.host.config.network.portgroup:
+ pg_lookup[pg.spec.name] = {'switch': pg.spec.vswitchName, 'vlan_id': pg.spec.vlanId}
+
+ if compute_resource:
+ for network in compute_resource.network:
+ if isinstance(network, vim.dvs.DistributedVirtualPortgroup):
+ dvs = network.config.distributedVirtualSwitch
+ if (switch_name and dvs.config.name == switch_name) or not switch_name:
+ if network.config.name == network_name:
+ return network
+ if hasattr(network.config.defaultPortConfig.vlan, 'vlanId') and \
+ network.config.defaultPortConfig.vlan.vlanId == vlan_id:
+ return network
+ if hasattr(network.config.defaultPortConfig.vlan, 'pvlanId') and \
+ network.config.defaultPortConfig.vlan.pvlanId == vlan_id:
+ return network
+ elif isinstance(network, vim.Network):
+ if network_name and network_name == network.name:
+ return network
+ if vlan_id:
+ for k in pg_lookup.keys():
+ if vlan_id == pg_lookup[k]['vlan_id']:
+ if k == network.name:
+ return network
+ break
+ return None
+
+ def _get_vlanid_from_network(self, network):
+ '''
+ get the vlan id from network object
+ :param network: network object to expect, either vim.Network or vim.dvs.DistributedVirtualPortgroup
+ :return: vlan id as an integer
+ :rtype: integer
+ '''
+ vlan_id = None
+ if isinstance(network, vim.dvs.DistributedVirtualPortgroup):
+ vlan_id = network.config.defaultPortConfig.vlan.vlanId
+
+ if isinstance(network, vim.Network) and hasattr(network, 'host'):
+ for host in network.host:
+ for pg in host.config.network.portgroup:
+ if pg.spec.name == network.name:
+ vlan_id = pg.spec.vlanId
+ return vlan_id
+
+ return vlan_id
+
+ def _get_nics_from_vm(self, vm_obj):
+ '''
+ return a list of dictionaries containing vm nic info and
+ a list of objects
+ :param vm_obj: object containing virtual machine
+ :return: list of dicts and list ith nic object(s)
+ :rtype: list, list
+ '''
+ nic_info_lst = []
+ nics = [nic for nic in vm_obj.config.hardware.device if isinstance(nic, vim.vm.device.VirtualEthernetCard)]
+ for nic in nics:
+ # common items of nic parameters
+ d_item = dict(
+ mac_address=nic.macAddress,
+ label=nic.deviceInfo.label,
+ unit_number=nic.unitNumber,
+ wake_onlan=nic.wakeOnLanEnabled,
+ allow_guest_ctl=nic.connectable.allowGuestControl,
+ connected=nic.connectable.connected,
+ start_connected=nic.connectable.startConnected,
+ )
+ # If NIC is a SR-IOV adapter
+ if isinstance(nic, vim.vm.device.VirtualSriovEthernetCard):
+ d_item['allow_guest_os_mtu_change'] = nic.allowGuestOSMtuChange
+ if isinstance(nic.sriovBacking, vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo):
+ if isinstance(nic.sriovBacking.physicalFunctionBacking, vim.vm.device.VirtualPCIPassthrough):
+ d_item['physical_function_backing'] = nic.sriovBacking.physicalFunctionBacking.id
+ if isinstance(nic.sriovBacking.virtualFunctionBacking, vim.vm.device.VirtualPCIPassthrough):
+ d_item['virtual_function_backing'] = nic.sriovBacking.virtualFunctionBacking.id
+ # If a distributed port group specified
+ if isinstance(nic.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
+ key = nic.backing.port.portgroupKey
+ for portgroup in vm_obj.network:
+ if hasattr(portgroup, 'key') and portgroup.key == key:
+ d_item['network_name'] = portgroup.name
+ d_item['switch'] = portgroup.config.distributedVirtualSwitch.name
+ break
+ # If an NSX-T port group specified
+ elif isinstance(nic.backing, vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo):
+ d_item['network_name'] = nic.backing.opaqueNetworkId
+ d_item['switch'] = nic.backing.opaqueNetworkType
+ # If a port group specified
+ elif isinstance(nic.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
+ d_item['network_name'] = nic.backing.network.name
+ d_item['vlan_id'] = self._get_vlanid_from_network(nic.backing.network)
+ if isinstance(nic.backing.network, vim.Network):
+ for pg in vm_obj.runtime.host.config.network.portgroup:
+ if pg.spec.name == nic.backing.network.name:
+ d_item['switch'] = pg.spec.vswitchName
+ break
+
+ for k in self.device_helper.nic_device_type:
+ if isinstance(nic, self.device_helper.nic_device_type[k]):
+ d_item['device_type'] = k
+ # VirtualVmxnet3Vrdma extends VirtualVmxnet3
+ if k == 'vmxnet3':
+ continue
+ else:
+ break
+ if d_item['device_type'] == 'pvrdma':
+ d_item['device_protocol'] = nic.deviceProtocol
+
+ nic_info_lst.append(d_item)
+
+ nic_info_lst = sorted(nic_info_lst, key=lambda d: d['mac_address'] if (d['mac_address'] is not None) else '00:00:00:00:00:00')
+ return nic_info_lst, nics
+
+ def _get_compute_resource_by_name(self, recurse=True):
+ '''
+ get compute resource object with matching name of esxi_hostname or cluster
+ parameters.
+ :param recurse: recurse vmware content folder, default is True
+ :return: object matching vim.ComputeResource or None if no match
+ :rtype: object
+ '''
+ resource_name = None
+ if self.params['esxi_hostname']:
+ resource_name = self.params['esxi_hostname']
+
+ if self.params['cluster']:
+ resource_name = self.params['cluster']
+
+ container = self.content.viewManager.CreateContainerView(self.content.rootFolder, [vim.ComputeResource], recurse)
+ for obj in container.view:
+ if self.params['esxi_hostname'] and isinstance(obj, vim.ClusterComputeResource) and hasattr(obj, 'host'):
+ for host in obj.host:
+ if host.name == resource_name:
+ return obj
+
+ if obj.name == resource_name:
+ return obj
+
+ return None
+
+ def _new_nic_spec(self, vm_obj, nic_obj=None, network_params=None):
+ network = self._get_network_object(vm_obj)
+
+ if network_params:
+ connected = network_params['connected']
+ device_type = network_params['device_type'].lower()
+ directpath_io = network_params['directpath_io']
+ guest_control = network_params['guest_control']
+ label = network_params['label']
+ mac_address = network_params['mac_address']
+ start_connected = network_params['start_connected']
+ wake_onlan = network_params['wake_onlan']
+ pf_backing = network_params['physical_function_backing']
+ vf_backing = network_params['virtual_function_backing']
+ allow_guest_os_mtu_change = network_params['allow_guest_os_mtu_change']
+ else:
+ connected = self.params['connected']
+ device_type = self.params['device_type'].lower()
+ directpath_io = self.params['directpath_io']
+ guest_control = self.params['guest_control']
+ label = self.params['label']
+ mac_address = self.params['mac_address']
+ start_connected = self.params['start_connected']
+ wake_onlan = self.params['wake_onlan']
+ pf_backing = self.params['physical_function_backing']
+ vf_backing = self.params['virtual_function_backing']
+ allow_guest_os_mtu_change = self.params['allow_guest_os_mtu_change']
+ pvrdma_device_protocol = self.params['pvrdma_device_protocol']
+
+ if not nic_obj:
+ device_obj = self.device_helper.nic_device_type[device_type]
+ nic_spec = vim.vm.device.VirtualDeviceSpec(
+ device=device_obj()
+ )
+ if mac_address:
+ nic_spec.device.addressType = 'manual'
+ nic_spec.device.macAddress = mac_address
+
+ if label:
+ nic_spec.device.deviceInfo = vim.Description(
+ label=label
+ )
+
+ if device_type == 'pvrdma' and pvrdma_device_protocol:
+ nic_spec.device.deviceProtocol = pvrdma_device_protocol
+ else:
+ nic_spec = vim.vm.device.VirtualDeviceSpec(
+ operation=vim.vm.device.VirtualDeviceSpec.Operation.edit,
+ device=nic_obj
+ )
+ if label and label != nic_obj.deviceInfo.label:
+ nic_spec.device.deviceInfo = vim.Description(
+ label=label
+ )
+ if mac_address and mac_address != nic_obj.macAddress:
+ nic_spec.device.addressType = 'manual'
+ nic_spec.device.macAddress = mac_address
+
+ nic_spec.device.backing = self._nic_backing_from_obj(network)
+ nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo(
+ startConnected=start_connected,
+ allowGuestControl=guest_control,
+ connected=connected
+ )
+ nic_spec.device.wakeOnLanEnabled = wake_onlan
+
+ if (pf_backing is not None or vf_backing is not None) and not isinstance(nic_spec.device, vim.vm.device.VirtualSriovEthernetCard):
+ self.module_fail_json(msg='physical_function_backing, virtual_function_backing can only be used with the sriov device type')
+
+ if isinstance(nic_spec.device, vim.vm.device.VirtualSriovEthernetCard):
+ nic_spec.device.allowGuestOSMtuChange = allow_guest_os_mtu_change
+ nic_spec.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
+ if pf_backing is not None:
+ nic_spec.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+ nic_spec.device.sriovBacking.physicalFunctionBacking.id = pf_backing
+ if vf_backing is not None:
+ nic_spec.device.sriovBacking.virtualFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+ nic_spec.device.sriovBacking.virtualFunctionBacking.id = vf_backing
+
+ if directpath_io and not isinstance(nic_spec.device, vim.vm.device.VirtualVmxnet3):
+ self.module.fail_json(msg='directpath_io can only be used with the vmxnet3 device type')
+
+ if directpath_io and isinstance(nic_spec.device, vim.vm.device.VirtualVmxnet3):
+ nic_spec.device.uptCompatibilityEnabled = True
+ return nic_spec
+
+ def _nic_backing_from_obj(self, network_obj):
+ rv = None
+ if isinstance(network_obj, vim.dvs.DistributedVirtualPortgroup):
+ rv = vim.VirtualEthernetCardDistributedVirtualPortBackingInfo(
+ port=vim.DistributedVirtualSwitchPortConnection(
+ portgroupKey=network_obj.key,
+ switchUuid=network_obj.config.distributedVirtualSwitch.uuid
+ )
+ )
+ elif isinstance(network_obj, vim.OpaqueNetwork):
+ rv = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo(
+ opaqueNetworkType='nsx.LogicalSwitch',
+ opaqueNetworkId=network_obj.summary.opaqueNetworkId
+ )
+ elif isinstance(network_obj, vim.Network):
+ rv = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo(
+ deviceName=network_obj.name,
+ network=network_obj
+ )
+ return rv
+
+ def _nic_absent(self, network_params=None):
+ changed = False
+ diff = {'before': {}, 'after': {}}
+ if network_params:
+ mac_address = network_params['mac_address']
+ else:
+ mac_address = self.params['mac_address']
+
+ device_spec = None
+ vm_obj = self.get_vm()
+ if not vm_obj:
+ self.module.fail_json(msg='could not find vm: {0}'.format(self.params['name']))
+ nic_info, nic_obj_lst = self._get_nics_from_vm(vm_obj)
+
+ for nic in nic_info:
+ diff['before'].update({nic['mac_address']: copy.copy(nic)})
+
+ network_info = copy.deepcopy(nic_info)
+
+ for nic_obj in nic_obj_lst:
+ if nic_obj.macAddress == mac_address:
+ if self.module.check_mode:
+ changed = True
+ for nic in nic_info:
+ if nic.get('mac_address') != nic_obj.macAddress:
+ diff['after'].update({nic['mac_address']: copy.copy(nic)})
+ network_info = [nic for nic in nic_info if nic.get('mac_address') != nic_obj.macAddress]
+ return diff, changed, network_info
+ device_spec = vim.vm.device.VirtualDeviceSpec(
+ device=nic_obj,
+ operation=vim.vm.device.VirtualDeviceSpec.Operation.remove
+ )
+ break
+
+ if not device_spec:
+ diff['after'] = diff['before']
+ return diff, changed, network_info
+
+ try:
+ task = vm_obj.ReconfigVM_Task(vim.vm.ConfigSpec(deviceChange=[device_spec]))
+ wait_for_task(task)
+ except (vim.fault.InvalidDeviceSpec, vim.fault.RestrictedVersion) as e:
+ self.module.fail_json(msg='failed to reconfigure guest', detail=e.msg)
+
+ if task.info.state == 'error':
+ self.module.fail_json(msg='failed to reconfigure guest', detail=task.info.error.msg)
+
+ vm_obj = self.get_vm()
+ nic_info, nic_obj_lst = self._get_nics_from_vm(vm_obj)
+
+ for nic in nic_info:
+ diff['after'].update({nic.get('mac_address'): copy.copy(nic)})
+
+ network_info = nic_info
+ if diff['after'] != diff['before']:
+ changed = True
+
+ return diff, changed, network_info
+
+ def _get_nic_info(self):
+ rv = {'network_info': []}
+ vm_obj = self.get_vm()
+ nic_info, nic_obj_lst = self._get_nics_from_vm(vm_obj)
+
+ rv['network_info'] = nic_info
+ return rv
+
+ def _nic_present(self):
+ changed = False
+ diff = {'before': {}, 'after': {}}
+ force = self.params['force']
+ label = self.params['label']
+ mac_address = self.params['mac_address']
+ network_name = self.params['network_name']
+ switch = self.params['switch']
+ vlan_id = self.params['vlan_id']
+
+ vm_obj = self.get_vm()
+ if not vm_obj:
+ self.module.fail_json(msg='could not find vm: {0}'.format(self.params['name']))
+
+ if self.params['device_type'] == 'pvrdma':
+ if int(vm_obj.config.version.split('vmx-')[-1]) > 19 or int(vm_obj.config.version.split('vmx-')[-1]) == 13:
+ self.params['pvrdma_device_protocol'] = None
+ else:
+ if self.params['pvrdma_device_protocol'] and self.params['pvrdma_device_protocol'] not in ['rocev1', 'rocev2']:
+ self.module.fail_json(msg="Valid values of parameter 'pvrdma_device_protocol' are 'rocev1',"
+ " 'rocev2' for VM with hardware version >= 14 and <= 19.")
+ if self.params['pvrdma_device_protocol'] is None:
+ self.params['pvrdma_device_protocol'] = 'rocev2'
+
+ network_obj = self._get_network_object(vm_obj)
+ nic_info, nic_obj_lst = self._get_nics_from_vm(vm_obj)
+ label_lst = [d.get('label') for d in nic_info]
+ mac_addr_lst = [d.get('mac_address') for d in nic_info]
+ vlan_id_lst = [d.get('vlan_id') for d in nic_info]
+ network_name_lst = [d.get('network_name') for d in nic_info]
+
+ # TODO: make checks below less inelegant
+ if ((vlan_id and vlan_id in vlan_id_lst) or (network_name and network_name in network_name_lst)
+ and not mac_address
+ and not label
+ and not force):
+ for nic in nic_info:
+ diff['before'].update({nic.get('mac_address'): copy.copy(nic)})
+ diff['after'].update({nic.get('mac_address'): copy.copy(nic)})
+ return diff, changed, nic_info
+
+ if not network_obj and (network_name or vlan_id):
+ self.module.fail_json(
+ msg='unable to find specified network_name/vlan_id ({0}), check parameters'.format(
+ network_name or vlan_id
+ )
+ )
+
+ for nic in nic_info:
+ diff['before'].update({nic.get('mac_address'): copy.copy(nic)})
+
+ if (mac_address and mac_address in mac_addr_lst) or (label and label in label_lst):
+ for nic_obj in nic_obj_lst:
+ if (mac_address and nic_obj.macAddress == mac_address) or (label and label == nic_obj.deviceInfo.label):
+ device_spec = self._new_nic_spec(vm_obj, nic_obj)
+
+ # fabricate diff for check_mode
+ if self.module.check_mode:
+ for nic in nic_info:
+ nic_mac = nic.get('mac_address')
+ nic_label = nic.get('label')
+ if nic_mac == mac_address or nic_label == label:
+ diff['after'][nic_mac] = copy.deepcopy(nic)
+ diff['after'][nic_mac].update({'switch': switch or nic['switch']})
+ if network_obj:
+ diff['after'][nic_mac].update(
+ {
+ 'vlan_id': self._get_vlanid_from_network(network_obj),
+ 'network_name': network_obj.name
+ }
+ )
+ else:
+ diff['after'].update({nic_mac: copy.deepcopy(nic)})
+
+ if (not mac_address or mac_address not in mac_addr_lst) and (not label or label not in label_lst):
+ device_spec = self._new_nic_spec(vm_obj, None)
+ device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ if self.module.check_mode:
+ # fabricate diff/returns for checkmode
+ diff['after'] = copy.deepcopy(diff['before'])
+ nic_mac = mac_address
+ if not nic_mac:
+ nic_mac = 'AA:BB:CC:DD:EE:FF'
+ if not label:
+ label = 'check_mode_adapter'
+ diff['after'].update(
+ {
+ nic_mac: {
+ 'vlan_id': self._get_vlanid_from_network(network_obj),
+ 'network_name': network_obj.name,
+ 'label': label,
+ 'mac_address': nic_mac,
+ 'unit_number': 40000
+ }
+ }
+ )
+
+ if self.module.check_mode:
+ network_info = [diff['after'][i] for i in diff['after']]
+ if diff['after'] != diff['before']:
+ changed = True
+ return diff, changed, network_info
+
+ if not self.module.check_mode:
+ try:
+ task = vm_obj.ReconfigVM_Task(vim.vm.ConfigSpec(deviceChange=[device_spec]))
+ wait_for_task(task)
+ except (vim.fault.InvalidDeviceSpec, vim.fault.RestrictedVersion) as e:
+ self.module.fail_json(msg='failed to reconfigure guest', detail=e.msg)
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+
+ if task.info.state == 'error':
+ self.module.fail_json(msg='failed to reconfigure guest', detail=task.info.error.msg)
+
+ vm_obj = self.get_vm()
+ network_info, nic_obj_lst = self._get_nics_from_vm(vm_obj)
+ for nic in network_info:
+ diff['after'].update({nic.get('mac_address'): copy.copy(nic)})
+
+ if diff['after'] != diff['before']:
+ changed = True
+ return diff, changed, network_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ moid=dict(type='str'),
+ folder=dict(type='str'),
+ datacenter=dict(type='str', default='ha-datacenter'),
+ esxi_hostname=dict(type='str'),
+ cluster=dict(type='str'),
+ mac_address=dict(type='str'),
+ vlan_id=dict(type='int'),
+ network_name=dict(type='str'),
+ device_type=dict(type='str', default='vmxnet3'),
+ pvrdma_device_protocol=dict(type='str'),
+ label=dict(type='str'),
+ switch=dict(type='str'),
+ connected=dict(type='bool', default=True),
+ start_connected=dict(type='bool', default=True),
+ wake_onlan=dict(type='bool', default=False),
+ directpath_io=dict(type='bool', default=False),
+ physical_function_backing=dict(type='str'),
+ virtual_function_backing=dict(type='str'),
+ allow_guest_os_mtu_change=dict(type='bool', default=True),
+ force=dict(type='bool', default=False),
+ gather_network_info=dict(type='bool', default=False, aliases=['gather_network_facts']),
+ guest_control=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'present'])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['vlan_id', 'network_name']
+ ],
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ supports_check_mode=True
+ )
+
+ pyv = PyVmomiHelper(module)
+
+ if module.params['gather_network_info']:
+ nics = pyv._get_nic_info()
+ network_data = {}
+ nics_sorted = sorted(nics.get('network_info'), key=lambda k: k['unit_number'])
+ for n, i in enumerate(nics_sorted):
+ key_name = '{0}'.format(n)
+ network_data[key_name] = i
+ network_data[key_name].update({'mac_addr': i['mac_address'], 'name': i['network_name']})
+
+ module.exit_json(network_info=nics.get('network_info'), network_data=network_data, changed=False)
+
+ if module.params['state'] == 'present':
+ diff, changed, network_info = pyv._nic_present()
+
+ if module.params['state'] == 'absent':
+ if not module.params['mac_address']:
+ module.fail_json(msg='parameter mac_address required when removing nics')
+ diff, changed, network_info = pyv._nic_absent()
+
+ module.exit_json(changed=changed, network_info=network_info, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_powerstate.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_powerstate.py
new file mode 100644
index 000000000..8b0f546a4
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_powerstate.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+---
+module: vmware_guest_powerstate
+short_description: Manages power states of virtual machines in vCenter
+description:
+- Power on / Power off / Restart a virtual machine.
+author:
+- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
+options:
+ datacenter:
+ description:
+ - The I(datacenter) where the VM you'd like to operate the power.
+ - This parameter is case sensitive.
+ default: ha-datacenter
+ type: str
+ state:
+ description:
+ - Set the state of the virtual machine.
+ choices: [ powered-off, powered-on, reboot-guest, restarted, shutdown-guest, suspended, present]
+ default: present
+ type: str
+ name:
+ description:
+ - Name of the virtual machine to work with.
+ - Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
+ type: str
+ name_match:
+ description:
+ - If multiple virtual machines matching the name, use the first or last found.
+ default: first
+ choices: [ first, last ]
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to manage if known, this is VMware's unique identifier.
+ - This is required if C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ scheduled_at:
+ description:
+ - Date and time in string format at which specified task needs to be performed.
+ - "The required format for date and time - 'dd/mm/yyyy hh:mm'."
+ - Scheduling task requires vCenter server. A standalone ESXi server does not support this option.
+ type: str
+ schedule_task_name:
+ description:
+ - Name of schedule task.
+ - Valid only if C(scheduled_at) is specified.
+ type: str
+ required: false
+ schedule_task_description:
+ description:
+ - Description of schedule task.
+ - Valid only if C(scheduled_at) is specified.
+ type: str
+ required: false
+ schedule_task_enabled:
+ description:
+ - Flag to indicate whether the scheduled task is enabled or disabled.
+ type: bool
+ required: false
+ default: true
+ force:
+ description:
+ - Ignore warnings and complete the actions.
+ - This parameter is useful while forcing virtual machine state.
+ default: false
+ type: bool
+ state_change_timeout:
+ description:
+ - If the C(state) is set to C(shutdown-guest), by default the module will return immediately after sending the shutdown signal.
+ - If this argument is set to a positive integer, the module will instead wait for the VM to reach the poweredoff state.
+ - The value sets a timeout in seconds for the module to wait for the state change.
+ default: 0
+ type: int
+ answer:
+ description:
+ - A list of questions to answer, should one or more arise while waiting for the task to complete.
+ - Some common uses are to allow a cdrom to be changed even if locked, or to answer the question as to whether a VM was copied or moved.
+ - The I(answer) can be used if I(state) is C(powered-on).
+ suboptions:
+ question:
+ description:
+ - The message id, for example C(msg.uuid.altered).
+ type: str
+ required: true
+ response:
+ description:
+ - The choice key, for example C(button.uuid.copiedTheVM).
+ type: str
+ required: true
+ type: list
+ elements: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+"""
+
+EXAMPLES = r"""
+- name: Set the state of a virtual machine to poweroff
+ community.vmware.vmware_guest_powerstate:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "/{{ datacenter_name }}/vm/my_folder"
+ name: "{{ guest_name }}"
+ state: powered-off
+ delegate_to: localhost
+ register: deploy
+
+- name: Set the state of a virtual machine to poweron using MoID
+ community.vmware.vmware_guest_powerstate:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "/{{ datacenter_name }}/vm/my_folder"
+ moid: vm-42
+ state: powered-on
+ delegate_to: localhost
+ register: deploy
+
+- name: Set the state of a virtual machine to poweroff at given scheduled time
+ community.vmware.vmware_guest_powerstate:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "/{{ datacenter_name }}/vm/my_folder"
+ name: "{{ guest_name }}"
+ state: powered-off
+ scheduled_at: "09/01/2018 10:18"
+ schedule_task_name: "task_00001"
+ schedule_task_description: "Sample task to poweroff VM"
+ schedule_task_enabled: true
+ delegate_to: localhost
+ register: deploy_at_schedule_datetime
+
+- name: Wait for the virtual machine to shutdown
+ community.vmware.vmware_guest_powerstate:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "{{ guest_name }}"
+ state: shutdown-guest
+ state_change_timeout: 200
+ delegate_to: localhost
+ register: deploy
+
+- name: Automatically answer if a question locked a virtual machine
+ block:
+ - name: Power on a virtual machine without the answer param
+ community.vmware.vmware_guest_powerstate:
+ hostname: "{{ esxi_hostname }}"
+ username: "{{ esxi_username }}"
+ password: "{{ esxi_password }}"
+ validate_certs: false
+ folder: "{{ f1 }}"
+ name: "{{ vm_name }}"
+ state: powered-on
+ rescue:
+ - name: Power on a virtual machine with the answer param
+ community.vmware.vmware_guest_powerstate:
+ hostname: "{{ esxi_hostname }}"
+ username: "{{ esxi_username }}"
+ password: "{{ esxi_password }}"
+ validate_certs: false
+ folder: "{{ f1 }}"
+ name: "{{ vm_name }}"
+ answer:
+ - question: "msg.uuid.altered"
+ response: "button.uuid.copiedTheVM"
+ state: powered-on
+"""
+
+RETURN = r""" # """
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from random import randint
+from datetime import datetime
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, set_vm_power_state, vmware_argument_spec, \
+ check_answer_question_status, make_answer_response, answer_question, gather_vm_facts
+from ansible.module_utils._text import to_native
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(type='str', default='ha-datacenter'),
+ state=dict(type='str', default='present',
+ choices=['present', 'powered-off', 'powered-on', 'reboot-guest', 'restarted', 'shutdown-guest', 'suspended']),
+ name=dict(type='str'),
+ name_match=dict(type='str', choices=['first', 'last'], default='first'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ force=dict(type='bool', default=False),
+ scheduled_at=dict(type='str'),
+ schedule_task_name=dict(),
+ schedule_task_description=dict(),
+ schedule_task_enabled=dict(type='bool', default=True),
+ state_change_timeout=dict(type='int', default=0),
+ answer=dict(type='list',
+ elements='dict',
+ options=dict(
+ question=dict(type='str', required=True),
+ response=dict(type='str', required=True)
+ ))
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ mutually_exclusive=[
+ ['name', 'uuid', 'moid'],
+ ['scheduled_at', 'answer']
+ ],
+ )
+
+ result = dict(changed=False,)
+
+ if module.params['folder']:
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ pyv = PyVmomi(module)
+
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ if vm:
+ # VM already exists, so set power state
+ scheduled_at = module.params.get('scheduled_at', None)
+ if scheduled_at:
+ if not pyv.is_vcenter():
+ module.fail_json(msg="Scheduling task requires vCenter, hostname %s "
+ "is an ESXi server." % module.params.get('hostname'))
+ powerstate = {
+ 'present': vim.VirtualMachine.PowerOn,
+ 'powered-off': vim.VirtualMachine.PowerOff,
+ 'powered-on': vim.VirtualMachine.PowerOn,
+ 'reboot-guest': vim.VirtualMachine.RebootGuest,
+ 'restarted': vim.VirtualMachine.Reset,
+ 'shutdown-guest': vim.VirtualMachine.ShutdownGuest,
+ 'suspended': vim.VirtualMachine.Suspend,
+ }
+ dt = ''
+ try:
+ dt = datetime.strptime(scheduled_at, '%d/%m/%Y %H:%M')
+ except ValueError as e:
+ module.fail_json(msg="Failed to convert given date and time string to Python datetime object,"
+ "please specify string in 'dd/mm/yyyy hh:mm' format: %s" % to_native(e))
+ schedule_task_spec = vim.scheduler.ScheduledTaskSpec()
+ schedule_task_name = module.params['schedule_task_name'] or 'task_%s' % str(randint(10000, 99999))
+ schedule_task_desc = module.params['schedule_task_description']
+ if schedule_task_desc is None:
+ schedule_task_desc = 'Schedule task for vm %s for ' \
+ 'operation %s at %s' % (vm.name, module.params['state'], scheduled_at)
+ schedule_task_spec.name = schedule_task_name
+ schedule_task_spec.description = schedule_task_desc
+ schedule_task_spec.scheduler = vim.scheduler.OnceTaskScheduler()
+ schedule_task_spec.scheduler.runAt = dt
+ schedule_task_spec.action = vim.action.MethodAction()
+ schedule_task_spec.action.name = powerstate[module.params['state']]
+ schedule_task_spec.enabled = module.params['schedule_task_enabled']
+
+ try:
+ pyv.content.scheduledTaskManager.CreateScheduledTask(vm, schedule_task_spec)
+ # As this is async task, we create scheduled task and mark state to changed.
+ module.exit_json(changed=True)
+ except vim.fault.InvalidName as e:
+ module.fail_json(msg="Failed to create scheduled task %s for %s : %s" % (module.params.get('state'),
+ vm.name,
+ to_native(e.msg)))
+ except vim.fault.DuplicateName as e:
+ module.exit_json(changed=False, details=to_native(e.msg))
+ except vmodl.fault.InvalidArgument as e:
+ module.fail_json(msg="Failed to create scheduled task %s as specifications "
+ "given are invalid: %s" % (module.params.get('state'),
+ to_native(e.msg)))
+ else:
+ # Check if a virtual machine is locked by a question
+ if check_answer_question_status(vm) and module.params['answer']:
+ try:
+ responses = make_answer_response(vm, module.params['answer'])
+ answer_question(vm, responses)
+ except Exception as e:
+ module.fail_json(msg="%s" % e)
+
+ # Wait until a virtual machine is unlocked
+ while True:
+ if check_answer_question_status(vm) is False:
+ break
+
+ result['changed'] = True
+ result['instance'] = gather_vm_facts(pyv.content, vm)
+ else:
+ result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'],
+ module.params['answer'])
+ result['answer'] = module.params['answer']
+ else:
+ id = module.params.get('uuid') or module.params.get('moid') or module.params.get('name')
+ module.fail_json(msg="Unable to set power state for non-existing virtual machine : '%s'" % id)
+
+ if result.get('failed') is True:
+ module.fail_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_register_operation.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_register_operation.py
new file mode 100644
index 000000000..4aa7e84a9
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_register_operation.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: vmware_guest_register_operation
+short_description: VM inventory registration operation
+author:
+ - sky-joker (@sky-joker)
+description:
+ - This module can register or unregister VMs to the inventory.
+options:
+ datacenter:
+ description:
+ - Destination datacenter for the register/unregister operation.
+ - This parameter is case sensitive.
+ type: str
+ default: ha-datacenter
+ cluster:
+ description:
+ - Specify a cluster name to register VM.
+ type: str
+ folder:
+ description:
+ - Description folder, absolute path of the target folder.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter.
+ - This parameter is case sensitive.
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ type: str
+ name:
+ description:
+ - Specify VM name to be registered in the inventory.
+ required: true
+ type: str
+ uuid:
+ description:
+ - UUID of the virtual machine to manage if known, this is VMware's unique identifier.
+ - If virtual machine does not exists, then this parameter is ignored.
+ type: str
+ esxi_hostname:
+ description:
+ - The ESXi hostname where the virtual machine will run.
+ - This parameter is case sensitive.
+ type: str
+ template:
+ description:
+ - Whether to register VM as a template.
+ default: false
+ type: bool
+ path:
+ description:
+ - Specify the path of vmx file.
+ - 'Examples:'
+ - ' [datastore1] vm/vm.vmx'
+ - ' [datastore1] vm/vm.vmtx'
+ type: str
+ resource_pool:
+ description:
+ - Specify a resource pool name to register VM.
+ - This parameter is case sensitive.
+ - Resource pool should be child of the selected host parent.
+ type: str
+ state:
+ description:
+ - Specify the state the virtual machine should be in.
+ - if set to C(present), register VM in inventory.
+ - if set to C(absent), unregister VM from inventory.
+ default: present
+ choices: [ present, absent ]
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Register VM to inventory
+ community.vmware.vmware_guest_register_operation:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ folder: "/vm"
+ esxi_hostname: "{{ esxi_hostname }}"
+ name: "{{ vm_name }}"
+ template: false
+ path: "[datastore1] vm/vm.vmx"
+ state: present
+
+- name: Register VM in resource pool
+ community.vmware.vmware_guest_register_operation:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ folder: "/vm"
+ resource_pool: "{{ resource_pool }}"
+ name: "{{ vm_name }}"
+ template: false
+ path: "[datastore1] vm/vm.vmx"
+ state: present
+
+- name: Register VM in Cluster
+ community.vmware.vmware_guest_register_operation:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ folder: "/vm"
+ cluster: "{{ cluster_name }}"
+ name: "{{ vm_name }}"
+ template: false
+ path: "[datastore1] vm/vm.vmx"
+ state: present
+
+- name: UnRegister VM from inventory
+ community.vmware.vmware_guest_register_operation:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ folder: "/vm"
+ name: "{{ vm_name }}"
+ state: absent
+'''
+
+RETURN = r'''
+'''
+
+
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_resource_pool_by_name, \
+ wait_for_task, compile_folder_path_for_object, find_cluster_by_name
+from ansible.module_utils.basic import AnsibleModule
+
+
+class VMwareGuestRegisterOperation(PyVmomi):
+ def __init__(self, module):
+ super(VMwareGuestRegisterOperation, self).__init__(module)
+ self.datacenter = module.params["datacenter"]
+ self.cluster = module.params["cluster"]
+ self.folder = module.params["folder"]
+ self.name = module.params["name"]
+ self.esxi_hostname = module.params["esxi_hostname"]
+ self.path = module.params["path"]
+ self.template = module.params["template"]
+ self.resource_pool = module.params["resource_pool"]
+ self.state = module.params["state"]
+
+ def execute(self):
+ result = dict(changed=False)
+
+ datacenter = self.find_datacenter_by_name(self.datacenter)
+ if not datacenter:
+ self.module.fail_json(msg="Cannot find the specified Datacenter: %s" % self.datacenter)
+
+ dcpath = compile_folder_path_for_object(datacenter)
+ if not dcpath.endswith("/"):
+ dcpath += "/"
+
+ if (self.folder in [None, "", "/"]):
+ self.module.fail_json(msg="Please specify folder path other than blank or '/'")
+ elif (self.folder.startswith("/vm")):
+ fullpath = "%s%s%s" % (dcpath, self.datacenter, self.folder)
+ else:
+ fullpath = "%s%s" % (dcpath, self.folder)
+
+ folder_obj = self.content.searchIndex.FindByInventoryPath(inventoryPath="%s" % fullpath)
+ if not folder_obj:
+ details = {
+ 'datacenter': datacenter.name,
+ 'datacenter_path': dcpath,
+ 'folder': self.folder,
+ 'full_search_path': fullpath,
+ }
+ self.module.fail_json(msg="No folder %s matched in the search path : %s" % (self.folder, fullpath),
+ details=details)
+
+ if self.state == "present":
+ vm_obj = self.get_vm()
+ if vm_obj:
+ if self.module.check_mode:
+ self.module.exit_json(**result)
+ self.module.exit_json(**result)
+ else:
+ if self.module.check_mode:
+ result['changed'] = True
+ self.module.exit_json(**result)
+
+ if self.esxi_hostname:
+ host_obj = self.find_hostsystem_by_name(self.esxi_hostname)
+ if not host_obj:
+ self.module.fail_json(msg="Cannot find the specified ESXi host: %s" % self.esxi_hostname)
+ else:
+ host_obj = None
+
+ if self.cluster:
+ cluster_obj = find_cluster_by_name(self.content, self.cluster, datacenter)
+ if not cluster_obj:
+ self.module.fail_json(msg="Cannot find the specified cluster name: %s" % self.cluster)
+
+ resource_pool_obj = cluster_obj.resourcePool
+ elif self.resource_pool:
+ resource_pool_obj = find_resource_pool_by_name(self.content, self.resource_pool)
+ if not resource_pool_obj:
+ self.module.fail_json(msg="Cannot find the specified resource pool: %s" % self.resource_pool)
+ else:
+ resource_pool_obj = host_obj.parent.resourcePool
+
+ task = folder_obj.RegisterVM_Task(path=self.path, name=self.name, asTemplate=self.template,
+ pool=resource_pool_obj, host=host_obj)
+
+ changed = False
+ try:
+ changed, info = wait_for_task(task)
+ except Exception as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+
+ result.update(changed=changed)
+ self.module.exit_json(**result)
+
+ if self.state == "absent":
+ vm_obj = self.get_vm()
+ if vm_obj:
+ if self.module.check_mode:
+ result['changed'] = True
+ self.module.exit_json(**result)
+ else:
+ if self.module.check_mode:
+ self.module.exit_json(**result)
+
+ if vm_obj:
+ try:
+ vm_obj.UnregisterVM()
+ result.update(changed=True)
+ except Exception as exc:
+ self.module.fail_json(msg=to_native(exc))
+
+ self.module.exit_json(**result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(datacenter=dict(type="str", default="ha-datacenter"),
+ cluster=dict(type="str"),
+ folder=dict(type="str"),
+ name=dict(type="str", required=True),
+ uuid=dict(type="str"),
+ esxi_hostname=dict(type="str"),
+ path=dict(type="str"),
+ template=dict(type="bool", default=False),
+ resource_pool=dict(type="str"),
+ state=dict(type="str", default="present", choices=["present", "absent"]))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['cluster', 'esxi_hostname'],
+ ],
+ required_one_of=[
+ ['name', 'uuid'],
+ ['cluster', 'esxi_hostname']
+ ],
+ supports_check_mode=True)
+
+ vmware_guest_register_operation = VMwareGuestRegisterOperation(module)
+ vmware_guest_register_operation.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_screenshot.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_screenshot.py
new file mode 100644
index 000000000..1f3fb43ec
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_screenshot.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# Copyright: (c) 2019, Diane Wang <dianew@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_screenshot
+short_description: Create a screenshot of the Virtual Machine console.
+description:
+ - This module is used to take screenshot of the given virtual machine when virtual machine is powered on.
+ - All parameters and VMware object names are case sensitive.
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+options:
+ name:
+ description:
+ - Name of the virtual machine.
+ - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to gather facts if known, this is VMware's unique identifier.
+ - This is a required parameter, if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is a required parameter, only if multiple VMs are found with same name.
+ - The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ cluster:
+ description:
+ - The name of cluster where the virtual machine is running.
+ - This is a required parameter, if C(esxi_hostname) is not set.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ type: str
+ esxi_hostname:
+ description:
+ - The ESXi hostname where the virtual machine is running.
+ - This is a required parameter, if C(cluster) is not set.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ type: str
+ datacenter:
+ description:
+ - The datacenter name to which virtual machine belongs to.
+ type: str
+ local_path:
+ description:
+ - 'If C(local_path) is not set, the created screenshot file will be kept in the directory of the virtual machine
+ on ESXi host. If C(local_path) is set to a valid path on local machine, then the screenshot file will be
+ downloaded from ESXi host to the local directory.'
+ - 'If not download screenshot file to local machine, you can open it through the returned file URL in screenshot
+ facts manually.'
+ type: path
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: take a screenshot of the virtual machine console
+ community.vmware.vmware_guest_screenshot:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "{{ folder_name }}"
+ name: "{{ vm_name }}"
+ local_path: "/tmp/"
+ delegate_to: localhost
+ register: take_screenshot
+
+- name: Take a screenshot of the virtual machine console using MoID
+ community.vmware.vmware_guest_screenshot:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "{{ folder_name }}"
+ moid: vm-42
+ local_path: "/tmp/"
+ delegate_to: localhost
+ register: take_screenshot
+'''
+
+RETURN = r'''
+screenshot_info:
+ description: display the facts of captured virtual machine screenshot file
+ returned: always
+ type: dict
+ sample: {
+ "virtual_machine": "test_vm",
+ "screenshot_file": "[datastore0] test_vm/test_vm-1.png",
+ "task_start_time": "2019-05-25T10:35:04.215016Z",
+ "task_complete_time": "2019-05-25T10:35:04.412622Z",
+ "result": "success",
+ "screenshot_file_url": "https://test_vcenter/folder/test_vm/test_vm-1.png?dcPath=test-dc&dsName=datastore0",
+ "download_local_path": "/tmp/",
+ "download_file_size": 2367,
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode, quote
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, get_parent_datacenter
+import os
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.change_detected = False
+
+ def generate_http_access_url(self, file_path):
+ # e.g., file_path is like this format: [datastore0] test_vm/test_vm-1.png
+ # from file_path generate URL
+ url_path = None
+ if not file_path:
+ return url_path
+
+ path = "/folder/%s" % quote(file_path.split()[1])
+ params = dict(dsName=file_path.split()[0].strip('[]'))
+ if not self.is_vcenter():
+ datacenter = 'ha-datacenter'
+ else:
+ datacenter = get_parent_datacenter(self.current_vm_obj).name.replace('&', '%26')
+ params['dcPath'] = datacenter
+ url_path = "https://%s%s?%s" % (self.params['hostname'], path, urlencode(params))
+
+ return url_path
+
+ def download_screenshot_file(self, file_url, local_file_path, file_name):
+ response = None
+ download_size = 0
+ # file is downloaded as local_file_name when specified, or use original file name
+ if local_file_path.endswith('.png'):
+ local_file_name = local_file_path.split('/')[-1]
+ local_file_path = local_file_path.rsplit('/', 1)[0]
+ else:
+ local_file_name = file_name
+ if not os.path.exists(local_file_path):
+ try:
+ os.makedirs(local_file_path)
+ except OSError as err:
+ self.module.fail_json(msg="Exception caught when create folder %s on local machine, with error %s"
+ % (local_file_path, to_native(err)))
+ local_file = os.path.join(local_file_path, local_file_name)
+ with open(local_file, 'wb') as handle:
+ try:
+ response = open_url(file_url, url_username=self.params.get('username'),
+ url_password=self.params.get('password'), validate_certs=False)
+ except Exception as err:
+ self.module.fail_json(msg="Download screenshot file from URL %s, failed due to %s" % (file_url, to_native(err)))
+ if not response or response.getcode() >= 400:
+ self.module.fail_json(msg="Download screenshot file from URL %s, failed with response %s, response code %s"
+ % (file_url, response, response.getcode()))
+ bytes_read = response.read(2 ** 20)
+ while bytes_read:
+ handle.write(bytes_read)
+ handle.flush()
+ os.fsync(handle.fileno())
+ download_size += len(bytes_read)
+ bytes_read = response.read(2 ** 20)
+
+ return download_size
+
+ def get_screenshot_facts(self, task_info, file_url, file_size):
+ screenshot_facts = dict()
+ if task_info is not None:
+ screenshot_facts = dict(
+ virtual_machine=task_info.entityName,
+ screenshot_file=task_info.result,
+ task_start_time=task_info.startTime,
+ task_complete_time=task_info.completeTime,
+ result=task_info.state,
+ screenshot_file_url=file_url,
+ download_local_path=self.params.get('local_path'),
+ download_file_size=file_size,
+ )
+
+ return screenshot_facts
+
+ def take_vm_screenshot(self):
+ if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn:
+ self.module.fail_json(msg="VM is %s, valid power state is poweredOn." % self.current_vm_obj.runtime.powerState)
+ try:
+ task = self.current_vm_obj.CreateScreenshot_Task()
+ wait_for_task(task)
+ except vim.fault.FileFault as e:
+ self.module.fail_json(msg="Failed to create screenshot due to errors when creating or accessing one or more"
+ " files needed for this operation, %s" % to_native(e.msg))
+ except vim.fault.InvalidState as e:
+ self.module.fail_json(msg="Failed to create screenshot due to VM is not ready to respond to such requests,"
+ " %s" % to_native(e.msg))
+ except vmodl.RuntimeFault as e:
+ self.module.fail_json(msg="Failed to create screenshot due to runtime fault, %s," % to_native(e.msg))
+ except vim.fault.TaskInProgress as e:
+ self.module.fail_json(msg="Failed to create screenshot due to VM is busy, %s" % to_native(e.msg))
+
+ if task.info.state == 'error':
+ return {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg}
+ else:
+ download_file_size = None
+ self.change_detected = True
+ file_url = self.generate_http_access_url(task.info.result)
+ if self.params.get('local_path'):
+ if file_url:
+ download_file_size = self.download_screenshot_file(file_url=file_url,
+ local_file_path=self.params['local_path'],
+ file_name=task.info.result.split('/')[-1])
+ screenshot_facts = self.get_screenshot_facts(task.info, file_url, download_file_size)
+ return {'changed': self.change_detected, 'failed': False, 'screenshot_info': screenshot_facts}
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ folder=dict(type='str'),
+ datacenter=dict(type='str'),
+ esxi_hostname=dict(type='str'),
+ cluster=dict(type='str'),
+ local_path=dict(type='path'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ]
+ )
+ pyv = PyVmomiHelper(module)
+ vm = pyv.get_vm()
+ if not vm:
+ vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
+ module.fail_json(msg='Unable to find the specified virtual machine : %s' % vm_id)
+
+ result = pyv.take_vm_screenshot()
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_sendkey.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_sendkey.py
new file mode 100644
index 000000000..3b858edd2
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_sendkey.py
@@ -0,0 +1,405 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Diane Wang <dianew@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_sendkey
+short_description: Send USB HID codes to the Virtual Machine's keyboard.
+description:
+ - This module is used to send keystrokes to given virtual machine.
+ - All parameters and VMware object names are case sensitive.
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+options:
+ name:
+ description:
+ - Name of the virtual machine.
+ - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to gather facts if known, this is VMware's unique identifier.
+ - This is a required parameter, if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is a required parameter, only if multiple VMs are found with same name.
+ - The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ cluster:
+ description:
+ - The name of cluster where the virtual machine is running.
+ - This is a required parameter, if C(esxi_hostname) is not set.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ type: str
+ esxi_hostname:
+ description:
+ - The ESXi hostname where the virtual machine is running.
+ - This is a required parameter, if C(cluster) is not set.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ type: str
+ datacenter:
+ description:
+ - The datacenter name to which virtual machine belongs to.
+ type: str
+ string_send:
+ description:
+ - The string will be sent to the virtual machine.
+ - This string can contain valid special character, alphabet and digit on the keyboard.
+ type: str
+ keys_send:
+ description:
+ - The list of the keys will be sent to the virtual machine.
+ - 'Valid values are C(ENTER), C(ESC), C(BACKSPACE), C(TAB), C(SPACE), C(CAPSLOCK), C(HOME), C(DELETE), C(END), C(CTRL_ALT_DEL),
+ C(CTRL_C), C(CTRL_X) and C(F1) to C(F12), C(RIGHTARROW), C(LEFTARROW), C(DOWNARROW), C(UPARROW).'
+ - If both C(keys_send) and C(string_send) are specified, keys in C(keys_send) list will be sent in front of the C(string_send).
+ - Values C(HOME) and C(END) are added in version 1.17.0.
+ type: list
+ default: []
+ elements: str
+ sleep_time:
+ description:
+ - Sleep time in seconds between two keys or string sent to the virtual machine.
+ - API is faster than actual key or string send to virtual machine, this parameter allow to control
+ delay between keys and/or strings.
+ type: int
+ default: 0
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Send list of keys to virtual machine
+ community.vmware.vmware_guest_sendkey:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "{{ folder_name }}"
+ name: "{{ vm_name }}"
+ keys_send:
+ - TAB
+ - TAB
+ - ENTER
+ delegate_to: localhost
+ register: keys_num_sent
+
+- name: Send list of keys to virtual machine using MoID
+ community.vmware.vmware_guest_sendkey:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "{{ folder_name }}"
+ moid: vm-42
+ keys_send:
+ - CTRL_ALT_DEL
+ delegate_to: localhost
+ register: ctrl_alt_del_sent
+
+- name: Send a string to virtual machine
+ community.vmware.vmware_guest_sendkey:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "{{ folder_name }}"
+ name: "{{ vm_name }}"
+ string_send: "user_logon"
+ delegate_to: localhost
+ register: keys_num_sent
+'''
+
+RETURN = r'''
+sendkey_info:
+ description: display the keys and the number of keys sent to the virtual machine
+ returned: always
+ type: dict
+ sample: {
+ "virtual_machine": "test_vm",
+ "keys_send": [
+ "SPACE",
+ "DOWNARROW",
+ "DOWNARROW",
+ "ENTER"
+ ],
+ "string_send": null,
+ "keys_send_number": 4,
+ "returned_keys_send_number": 4,
+ }
+'''
+
+import time
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.change_detected = False
+ self.num_keys_send = 0
+ # HID usage tables https://www.usb.org/sites/default/files/documents/hut1_12v2.pdf
+ # define valid characters and keys value, hex_code, key value and key modifier
+ self.keys_hid_code = [
+ (('a', 'A'), '0x04', [('a', []), ('A', ['LEFTSHIFT'])]),
+ (('b', 'B'), '0x05', [('b', []), ('B', ['LEFTSHIFT'])]),
+ (('c', 'C'), '0x06', [('c', []), ('C', ['LEFTSHIFT'])]),
+ (('d', 'D'), '0x07', [('d', []), ('D', ['LEFTSHIFT'])]),
+ (('e', 'E'), '0x08', [('e', []), ('E', ['LEFTSHIFT'])]),
+ (('f', 'F'), '0x09', [('f', []), ('F', ['LEFTSHIFT'])]),
+ (('g', 'G'), '0x0a', [('g', []), ('G', ['LEFTSHIFT'])]),
+ (('h', 'H'), '0x0b', [('h', []), ('H', ['LEFTSHIFT'])]),
+ (('i', 'I'), '0x0c', [('i', []), ('I', ['LEFTSHIFT'])]),
+ (('j', 'J'), '0x0d', [('j', []), ('J', ['LEFTSHIFT'])]),
+ (('k', 'K'), '0x0e', [('k', []), ('K', ['LEFTSHIFT'])]),
+ (('l', 'L'), '0x0f', [('l', []), ('L', ['LEFTSHIFT'])]),
+ (('m', 'M'), '0x10', [('m', []), ('M', ['LEFTSHIFT'])]),
+ (('n', 'N'), '0x11', [('n', []), ('N', ['LEFTSHIFT'])]),
+ (('o', 'O'), '0x12', [('o', []), ('O', ['LEFTSHIFT'])]),
+ (('p', 'P'), '0x13', [('p', []), ('P', ['LEFTSHIFT'])]),
+ (('q', 'Q'), '0x14', [('q', []), ('Q', ['LEFTSHIFT'])]),
+ (('r', 'R'), '0x15', [('r', []), ('R', ['LEFTSHIFT'])]),
+ (('s', 'S'), '0x16', [('s', []), ('S', ['LEFTSHIFT'])]),
+ (('t', 'T'), '0x17', [('t', []), ('T', ['LEFTSHIFT'])]),
+ (('u', 'U'), '0x18', [('u', []), ('U', ['LEFTSHIFT'])]),
+ (('v', 'V'), '0x19', [('v', []), ('V', ['LEFTSHIFT'])]),
+ (('w', 'W'), '0x1a', [('w', []), ('W', ['LEFTSHIFT'])]),
+ (('x', 'X'), '0x1b', [('x', []), ('X', ['LEFTSHIFT'])]),
+ (('y', 'Y'), '0x1c', [('y', []), ('Y', ['LEFTSHIFT'])]),
+ (('z', 'Z'), '0x1d', [('z', []), ('Z', ['LEFTSHIFT'])]),
+ (('1', '!'), '0x1e', [('1', []), ('!', ['LEFTSHIFT'])]),
+ (('2', '@'), '0x1f', [('2', []), ('@', ['LEFTSHIFT'])]),
+ (('3', '#'), '0x20', [('3', []), ('#', ['LEFTSHIFT'])]),
+ (('4', '$'), '0x21', [('4', []), ('$', ['LEFTSHIFT'])]),
+ (('5', '%'), '0x22', [('5', []), ('%', ['LEFTSHIFT'])]),
+ (('6', '^'), '0x23', [('6', []), ('^', ['LEFTSHIFT'])]),
+ (('7', '&'), '0x24', [('7', []), ('&', ['LEFTSHIFT'])]),
+ (('8', '*'), '0x25', [('8', []), ('*', ['LEFTSHIFT'])]),
+ (('9', '('), '0x26', [('9', []), ('(', ['LEFTSHIFT'])]),
+ (('0', ')'), '0x27', [('0', []), (')', ['LEFTSHIFT'])]),
+ (('-', '_'), '0x2d', [('-', []), ('_', ['LEFTSHIFT'])]),
+ (('=', '+'), '0x2e', [('=', []), ('+', ['LEFTSHIFT'])]),
+ (('[', '{'), '0x2f', [('[', []), ('{', ['LEFTSHIFT'])]),
+ ((']', '}'), '0x30', [(']', []), ('}', ['LEFTSHIFT'])]),
+ (('\\', '|'), '0x31', [('\\', []), ('|', ['LEFTSHIFT'])]),
+ ((';', ':'), '0x33', [(';', []), (':', ['LEFTSHIFT'])]),
+ (('\'', '"'), '0x34', [('\'', []), ('"', ['LEFTSHIFT'])]),
+ (('`', '~'), '0x35', [('`', []), ('~', ['LEFTSHIFT'])]),
+ ((',', '<'), '0x36', [(',', []), ('<', ['LEFTSHIFT'])]),
+ (('.', '>'), '0x37', [('.', []), ('>', ['LEFTSHIFT'])]),
+ (('/', '?'), '0x38', [('/', []), ('?', ['LEFTSHIFT'])]),
+ ('ENTER', '0x28', [('', [])]),
+ ('ESC', '0x29', [('', [])]),
+ ('BACKSPACE', '0x2a', [('', [])]),
+ ('TAB', '0x2b', [('', [])]),
+ ('SPACE', '0x2c', [(' ', [])]),
+ ('CAPSLOCK', '0x39', [('', [])]),
+ ('F1', '0x3a', [('', [])]),
+ ('F2', '0x3b', [('', [])]),
+ ('F3', '0x3c', [('', [])]),
+ ('F4', '0x3d', [('', [])]),
+ ('F5', '0x3e', [('', [])]),
+ ('F6', '0x3f', [('', [])]),
+ ('F7', '0x40', [('', [])]),
+ ('F8', '0x41', [('', [])]),
+ ('F9', '0x42', [('', [])]),
+ ('F10', '0x43', [('', [])]),
+ ('F11', '0x44', [('', [])]),
+ ('F12', '0x45', [('', [])]),
+ ('HOME', '0x4a', [('', [])]),
+ ('DELETE', '0x4c', [('', [])]),
+ ('END', '0x4d', [('', [])]),
+ ('CTRL_ALT_DEL', '0x4c', [('', ['CTRL', 'ALT'])]),
+ ('CTRL_C', '0x06', [('', ['CTRL'])]),
+ ('CTRL_X', '0x1b', [('', ['CTRL'])]),
+ ('RIGHTARROW', '0x4f', [('', [])]),
+ ('LEFTARROW', '0x50', [('', [])]),
+ ('DOWNARROW', '0x51', [('', [])]),
+ ('UPARROW', '0x52', [('', [])]),
+ ]
+
+ @staticmethod
+ def hid_to_hex(hid_code):
+ return int(hid_code, 16) << 16 | 0o0007
+
+ def get_hid_from_key(self, key):
+ if key == ' ':
+ return '0x2c', []
+ for keys_name, key_code, keys_value in self.keys_hid_code:
+ if isinstance(keys_name, tuple):
+ for keys in keys_value:
+ if key == keys[0]:
+ return key_code, keys[1]
+ else:
+ if key == keys_name:
+ return key_code, keys_value[0][1]
+
+ def get_key_event(self, hid_code, modifiers):
+ key_event = vim.UsbScanCodeSpecKeyEvent()
+ key_modifier = vim.UsbScanCodeSpecModifierType()
+ key_modifier.leftAlt = False
+ key_modifier.leftControl = False
+ key_modifier.leftGui = False
+ key_modifier.leftShift = False
+ key_modifier.rightAlt = False
+ key_modifier.rightControl = False
+ key_modifier.rightGui = False
+ key_modifier.rightShift = False
+ # rightShift, rightControl, rightAlt, leftGui, rightGui are not used
+ if "LEFTSHIFT" in modifiers:
+ key_modifier.leftShift = True
+ if "CTRL" in modifiers:
+ key_modifier.leftControl = True
+ if "ALT" in modifiers:
+ key_modifier.leftAlt = True
+ key_event.modifiers = key_modifier
+ key_event.usbHidCode = self.hid_to_hex(hid_code)
+
+ return key_event
+
+ def get_sendkey_facts(self, vm_obj, returned_value=0):
+ sendkey_facts = dict()
+ if vm_obj is not None:
+ sendkey_facts = dict(
+ virtual_machine=vm_obj.name,
+ keys_send=self.params['keys_send'],
+ string_send=self.params['string_send'],
+ keys_send_number=self.num_keys_send,
+ returned_keys_send_number=returned_value,
+ )
+
+ return sendkey_facts
+
+ def send_key_events(self, vm_obj, key_queue, sleep_time=0):
+ """
+ Send USB HID Scan codes individually to prevent dropping or cobblering
+ """
+ send_keys = 0
+ for item in key_queue:
+ usb_spec = vim.UsbScanCodeSpec()
+ usb_spec.keyEvents.append(item)
+ send_keys += vm_obj.PutUsbScanCodes(usb_spec)
+ # Sleep in between key / string send event
+ time.sleep(sleep_time)
+ return send_keys
+
+ def send_key_to_vm(self, vm_obj):
+ key_event = None
+ num_keys_returned = 0
+ key_queue = []
+ if self.params['keys_send']:
+ for specified_key in self.params['keys_send']:
+ key_found = False
+ for keys in self.keys_hid_code:
+ if (isinstance(keys[0], tuple) and specified_key in keys[0]) or \
+ (not isinstance(keys[0], tuple) and specified_key == keys[0]):
+ hid_code, modifiers = self.get_hid_from_key(specified_key)
+ key_event = self.get_key_event(hid_code, modifiers)
+ key_queue.append(key_event)
+ self.num_keys_send += 1
+ key_found = True
+ break
+ if not key_found:
+ self.module.fail_json(msg="keys_send parameter: '%s' in %s not supported."
+ % (specified_key, self.params['keys_send']))
+
+ if self.params['string_send']:
+ for char in self.params['string_send']:
+ key_found = False
+ for keys in self.keys_hid_code:
+ if (isinstance(keys[0], tuple) and char in keys[0]) or char == ' ':
+ hid_code, modifiers = self.get_hid_from_key(char)
+ key_event = self.get_key_event(hid_code, modifiers)
+ key_queue.append(key_event)
+ self.num_keys_send += 1
+ key_found = True
+ break
+ if not key_found:
+ self.module.fail_json(msg="string_send parameter: '%s' contains char: '%s' not supported."
+ % (self.params['string_send'], char))
+
+ if key_queue:
+ try:
+ num_keys_returned = self.send_key_events(vm_obj=vm_obj, key_queue=key_queue, sleep_time=self.module.params.get('sleep_time'))
+ self.change_detected = True
+ except vmodl.RuntimeFault as e:
+ self.module.fail_json(msg="Failed to send key %s to virtual machine due to %s" % (key_event, to_native(e.msg)))
+
+ sendkey_facts = self.get_sendkey_facts(vm_obj, num_keys_returned)
+ if num_keys_returned != self.num_keys_send:
+ results = {'changed': self.change_detected, 'failed': True, 'sendkey_info': sendkey_facts}
+ else:
+ results = {'changed': self.change_detected, 'failed': False, 'sendkey_info': sendkey_facts}
+
+ return results
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ folder=dict(type='str'),
+ datacenter=dict(type='str'),
+ esxi_hostname=dict(type='str'),
+ cluster=dict(type='str'),
+ keys_send=dict(type='list', default=[], elements='str', no_log=False),
+ string_send=dict(type='str'),
+ sleep_time=dict(type='int', default=0),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ]
+ )
+
+ pyv = PyVmomiHelper(module)
+ vm = pyv.get_vm()
+ if not vm:
+ vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
+ module.fail_json(msg='Unable to find the specified virtual machine : %s ' % vm_id)
+
+ result = pyv.send_key_to_vm(vm)
+ if result['failed']:
+ module.fail_json(**result)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_serial_port.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_serial_port.py
new file mode 100644
index 000000000..bc1b4c7db
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_serial_port.py
@@ -0,0 +1,580 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Anusha Hegde <anushah@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_serial_port
+short_description: Manage serial ports on an existing VM
+description:
+ - "This module can be used to manage serial ports on an existing VM"
+options:
+ name:
+ description:
+ - Name of the virtual machine.
+ - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to manage the serial ports, this is VMware's unique identifier.
+ - This is a required parameter, if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ backings:
+ type: list
+ elements: dict
+ description:
+ - A list of backings for serial ports.
+ - 'C(backing_type) (str): is required to add or reconfigure or remove an existing serial port.'
+ required: true
+ suboptions:
+ backing_type:
+ description:
+ - Backing type is required for the serial ports to be added or reconfigured or removed.
+ type: str
+ required: true
+ aliases:
+ - type
+ state:
+ description:
+ - C(state) is required to identify whether we are adding, modifying or removing the serial port.
+ - If C(state) is set to C(present), a serial port will be added or modified.
+ - If C(state) is set to C(absent), an existing serial port will be removed.
+ - If an existing serial port to modify or remove, C(backing_type) and either of C(service_uri) or C(pipe_name)
+ or C(device_name) or C(file_path) are required.
+ choices:
+ - present
+ - absent
+ type: str
+ default: present
+ yield_on_poll:
+ description:
+ - Enables CPU yield behavior.
+ type: bool
+ default: true
+ pipe_name:
+ description:
+ - Pipe name for the host pipe.
+ - Required when I(backing_type=pipe).
+ type: str
+ endpoint:
+ description:
+ - When you use serial port pipe backing to connect a virtual machine to another process, you must define the endpoints.
+ - Required when I(backing_type=pipe).
+ type: str
+ choices:
+ - client
+ - server
+ default: client
+ no_rx_loss:
+ description:
+ - Enables optimized data transfer over the pipe.
+ - Required when I(backing_type=pipe).
+ type: bool
+ default: false
+ service_uri:
+ description:
+ - Identifies the local host or a system on the network, depending on the value of I(direction).
+ - If you use the virtual machine as a server, the URI identifies the host on which the virtual machine runs.
+ - In this case, the host name part of the URI should be empty, or it should specify the address of the local host.
+ - If you use the virtual machine as a client, the URI identifies the remote system on the network.
+ - Required when I(backing_type=network).
+ type: str
+ proxy_uri:
+ description:
+ - Identifies a vSPC proxy service that provides network access to the I(service_uri).
+ - If you specify a proxy URI, the virtual machine initiates a connection with the proxy service
+ and forwards the serviceURI and direction to the proxy.
+ - The C(Use Virtual Serial Port Concentrator) option is automatically enabled when I(proxy_uri) is set.
+ type: str
+ version_added: '3.7.0'
+ direction:
+ description:
+ - The direction of the connection.
+ - Required when I(backing_type=network).
+ type: str
+ choices:
+ - client
+ - server
+ default: client
+ device_name:
+ description:
+ - Serial device absolutely path.
+ - Required when I(backing_type=device).
+ type: str
+ file_path:
+ description:
+ - File path for the host file used in this backing. Fully qualified path is required, like <datastore_name>/<file_name>.
+ - Required when I(backing_type=file).
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+author:
+ - Anusha Hegde (@anusha94)
+'''
+
+EXAMPLES = r'''
+# Create serial ports
+- name: Create multiple serial ports with Backing type - network, pipe, device and file
+ community.vmware.vmware_guest_serial_port:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "test_vm1"
+ backings:
+ - type: 'network'
+ direction: 'client'
+ service_uri: 'tcp://6000'
+ yield_on_poll: true
+ - type: 'pipe'
+ pipe_name: 'serial_pipe'
+ endpoint: 'client'
+ - type: 'device'
+ device_name: '/dev/char/serial/uart0'
+ - type: 'file'
+ file_path: '[datastore1]/file1'
+ yield_on_poll: true
+ register: create_multiple_ports
+
+# Create vSPC port
+- name: Create network serial port with vSPC
+ community.vmware.vmware_guest_serial_port:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "test_vm1"
+ backings:
+ - type: 'network'
+ direction: 'server'
+ service_uri: 'vSPC.py'
+ proxy_uri: 'telnets://<host>:<port>'
+ yield_on_poll: true
+
+# Modify existing serial port
+- name: Modify Network backing type
+ community.vmware.vmware_guest_serial_port:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ name: '{{ name }}'
+ backings:
+ - type: 'network'
+ state: 'present'
+ direction: 'server'
+ service_uri: 'tcp://6000'
+ delegate_to: localhost
+
+# Remove serial port
+- name: Remove pipe backing type
+ community.vmware.vmware_guest_serial_port:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ name: '{{ name }}'
+ backings:
+ - type: 'pipe'
+ state: 'absent'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+serial_port_data:
+ description: metadata about the virtual machine's serial ports after managing them
+ returned: always
+ type: dict
+ sample: [
+ {
+ "backing_type": "network",
+ "direction": "client",
+ "service_uri": "tcp://6000"
+ },
+ {
+ "backing_type": "pipe",
+ "direction": "server",
+ "pipe_name": "serial pipe"
+ },
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task
+from ansible.module_utils._text import to_native
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+
+class PyVmomiHelper(PyVmomi):
+ """ This class is a helper to create easily VMware Spec for PyVmomiHelper """
+
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.change_applied = False # a change was applied meaning at least one task succeeded
+ self.config_spec = vim.vm.ConfigSpec()
+ self.config_spec.deviceChange = []
+ self.serial_ports = []
+
+ def check_vm_state(self, vm_obj):
+ """
+ To add serial port, the VM must be in powered off state
+
+ Input:
+ - vm: Virtual Machine
+
+ Output:
+ - True if vm is in poweredOff state
+ - module fails otherwise
+ """
+ if vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff:
+ return True
+ self.module.fail_json(msg="A serial device cannot be added to a VM in the current state(" + vm_obj.runtime.powerState + "). "
+ "Please use the vmware_guest_powerstate module to power off the VM")
+
+ def get_serial_port_config_spec(self, vm_obj):
+ """
+ Variables changed:
+ - self.config_spec
+ - self.change_applied
+ """
+ # create serial config spec for adding, editing, removing
+ for backing in self.params.get('backings'):
+ serial_port = get_serial_port(vm_obj, backing)
+ if serial_port:
+ serial_spec = vim.vm.device.VirtualDeviceSpec()
+ serial_spec.device = serial_port
+ if diff_serial_port_config(serial_port, backing):
+ if backing['state'] == 'present':
+ # modify existing serial port
+ serial_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ backing_type = backing.get('type', backing.get('backing_type'))
+ serial_spec.device.backing = self.get_backing_info(serial_port, backing, backing_type)
+ serial_spec.device.yieldOnPoll = backing['yield_on_poll']
+ self.change_applied = True
+ self.config_spec.deviceChange.append(serial_spec)
+ elif backing['state'] == 'absent':
+ # remove serial port
+ serial_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
+ self.change_applied = True
+ self.config_spec.deviceChange.append(serial_spec)
+ else:
+ if backing['state'] == 'present':
+ # if serial port is None
+ # create a new serial port
+ serial_port_spec = self.create_serial_port(backing)
+ serial_port_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ self.serial_ports.append(serial_port_spec)
+ self.change_applied = True
+
+ def reconfigure_vm_serial_port(self, vm_obj):
+ """
+ Reconfigure vm with new or modified serial port config spec
+ """
+ self.get_serial_port_config_spec(vm_obj)
+ try:
+ # configure create tasks first
+ if self.serial_ports:
+ for serial_port in self.serial_ports:
+ # each type of serial port is of config_spec.device = vim.vm.device.VirtualSerialPort() object type
+ # because serial ports differ in the backing types and config_spec.device has to be unique,
+ # we are creating a new spec for every create port configuration
+ spec = vim.vm.ConfigSpec()
+ spec.deviceChange.append(serial_port)
+ task = vm_obj.ReconfigVM_Task(spec=spec)
+ wait_for_task(task)
+ task = vm_obj.ReconfigVM_Task(spec=self.config_spec)
+ wait_for_task(task)
+ except vim.fault.InvalidDatastorePath as e:
+ self.module.fail_json(msg="Failed to configure serial port on given virtual machine due to invalid path: %s" % to_native(e.msg))
+ except vim.fault.RestrictedVersion as e:
+ self.module.fail_json(msg="Failed to reconfigure virtual machine due to product versioning restrictions: %s" % to_native(e.msg))
+ if task.info.state == 'error':
+ results = {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg}
+ else:
+ serial_port_info = get_serial_port_info(vm_obj)
+ results = {'changed': self.change_applied, 'failed': False, 'serial_port_info': serial_port_info}
+
+ return results
+
+ def set_network_backing(self, serial_port, backing_info):
+ """
+ Set the networking backing params
+ """
+ required_params = ['service_uri', 'direction']
+ if set(required_params).issubset(backing_info.keys()):
+ backing = serial_port.URIBackingInfo()
+ backing.serviceURI = backing_info['service_uri']
+ backing.proxyURI = backing_info['proxy_uri']
+ backing.direction = backing_info['direction']
+ else:
+ self.module.fail_json(msg="Failed to create a new serial port of network backing type due to insufficient parameters."
+ + "The required parameters are service_uri and direction")
+ return backing
+
+ def set_pipe_backing(self, serial_port, backing_info):
+ """
+ Set the pipe backing params
+ """
+ required_params = ['pipe_name', 'endpoint']
+ if set(required_params).issubset(backing_info.keys()):
+ backing = serial_port.PipeBackingInfo()
+ backing.pipeName = backing_info['pipe_name']
+ backing.endpoint = backing_info['endpoint']
+ else:
+ self.module.fail_json(msg="Failed to create a new serial port of pipe backing type due to insufficient parameters."
+ + "The required parameters are pipe_name and endpoint")
+
+ # since no_rx_loss is an optional argument, so check if the key is present
+ if 'no_rx_loss' in backing_info.keys() and backing_info['no_rx_loss']:
+ backing.noRxLoss = backing_info['no_rx_loss']
+ return backing
+
+ def set_device_backing(self, serial_port, backing_info):
+ """
+ Set the device backing params
+ """
+ required_params = ['device_name']
+ if set(required_params).issubset(backing_info.keys()):
+ backing = serial_port.DeviceBackingInfo()
+ backing.deviceName = backing_info['device_name']
+ else:
+ self.module.fail_json(msg="Failed to create a new serial port of device backing type due to insufficient parameters."
+ + "The required parameters are device_name")
+ return backing
+
+ def set_file_backing(self, serial_port, backing_info):
+ """
+ Set the file backing params
+ """
+ required_params = ['file_path']
+ if set(required_params).issubset(backing_info.keys()):
+ backing = serial_port.FileBackingInfo()
+ backing.fileName = backing_info['file_path']
+ else:
+ self.module.fail_json(msg="Failed to create a new serial port of file backing type due to insufficient parameters."
+ + "The required parameters are file_path")
+ return backing
+
+ def get_backing_info(self, serial_port, backing, backing_type):
+ """
+ Returns the call to the appropriate backing function based on the backing type
+ """
+ switcher = {
+ "network": self.set_network_backing,
+ "pipe": self.set_pipe_backing,
+ "device": self.set_device_backing,
+ "file": self.set_file_backing
+ }
+ backing_func = switcher.get(backing_type, None)
+ if backing_func is None:
+ self.module.fail_json(msg="Failed to find a valid backing type. "
+ "Provided '%s', should be one of [%s]" % (backing_type, ', '.join(switcher.keys())))
+ return backing_func(serial_port, backing)
+
+ def create_serial_port(self, backing):
+ """
+ Create a new serial port
+ """
+ serial_spec = vim.vm.device.VirtualDeviceSpec()
+ serial_port = vim.vm.device.VirtualSerialPort()
+ serial_port.yieldOnPoll = backing['yield_on_poll']
+ backing_type = backing.get('type', backing.get('backing_type', None))
+ serial_port.backing = self.get_backing_info(serial_port, backing, backing_type)
+ serial_spec.device = serial_port
+ return serial_spec
+
+
+def get_serial_port(vm_obj, backing):
+ """
+ Return the serial port of specified backing type
+ """
+ serial_port = None
+ backing_type_mapping = {
+ 'network': vim.vm.device.VirtualSerialPort.URIBackingInfo,
+ 'pipe': vim.vm.device.VirtualSerialPort.PipeBackingInfo,
+ 'device': vim.vm.device.VirtualSerialPort.DeviceBackingInfo,
+ 'file': vim.vm.device.VirtualSerialPort.FileBackingInfo
+ }
+ valid_params = backing.keys()
+ for device in vm_obj.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualSerialPort):
+ backing_type = backing.get('type', backing.get('backing_type', None))
+ if isinstance(device.backing, backing_type_mapping[backing_type]):
+ if 'service_uri' in valid_params:
+ # network backing type
+ serial_port = device
+ break
+ if 'pipe_name' in valid_params:
+ # named pipe backing type
+ serial_port = device
+ break
+ if 'device_name' in valid_params:
+ # physical serial device backing type
+ serial_port = device
+ break
+ if 'file_path' in valid_params:
+ # file backing type
+ serial_port = device
+ break
+ # if there is a backing of only one type, user need not provide secondary details like service_uri, pipe_name, device_name or file_path
+ # we will match the serial port with backing type only
+ # in this case, the last matching serial port will be returned
+ serial_port = device
+ return serial_port
+
+
+def get_serial_port_info(vm_obj):
+ """
+ Get the serial port info
+ """
+ serial_port_info = []
+ if vm_obj is None:
+ return serial_port_info
+ for port in vm_obj.config.hardware.device:
+ backing = dict()
+ if isinstance(port, vim.vm.device.VirtualSerialPort):
+ if isinstance(port.backing, vim.vm.device.VirtualSerialPort.URIBackingInfo):
+ backing['backing_type'] = 'network'
+ backing['direction'] = port.backing.direction
+ backing['service_uri'] = port.backing.serviceURI
+ backing['proxy_uri'] = port.backing.proxyURI
+ elif isinstance(port.backing, vim.vm.device.VirtualSerialPort.PipeBackingInfo):
+ backing['backing_type'] = 'pipe'
+ backing['pipe_name'] = port.backing.pipeName
+ backing['endpoint'] = port.backing.endpoint
+ backing['no_rx_loss'] = port.backing.noRxLoss
+ elif isinstance(port.backing, vim.vm.device.VirtualSerialPort.DeviceBackingInfo):
+ backing['backing_type'] = 'device'
+ backing['device_name'] = port.backing.deviceName
+ elif isinstance(port.backing, vim.vm.device.VirtualSerialPort.FileBackingInfo):
+ backing['backing_type'] = 'file'
+ backing['file_path'] = port.backing.fileName
+ else:
+ continue
+ serial_port_info.append(backing)
+ return serial_port_info
+
+
+def diff_serial_port_config(serial_port, backing):
+ if backing['state'] == 'present':
+ if 'yield_on_poll' in backing:
+ if serial_port.yieldOnPoll != backing['yield_on_poll']:
+ return True
+ if backing['service_uri'] is not None:
+ if serial_port.backing.serviceURI != backing['service_uri'] or \
+ serial_port.backing.direction != backing['direction'] or \
+ serial_port.backing.proxyURI != backing['proxy_uri']:
+ return True
+ if backing['pipe_name'] is not None:
+ if serial_port.backing.pipeName != backing['pipe_name'] or \
+ serial_port.backing.endpoint != backing['endpoint'] or \
+ serial_port.backing.noRxLoss != backing['no_rx_loss']:
+ return True
+ if backing['device_name'] is not None:
+ if serial_port.backing.deviceName != backing['device_name']:
+ return True
+ if backing['file_path'] is not None:
+ if serial_port.backing.fileName != backing['file_path']:
+ return True
+
+ if backing['state'] == 'absent':
+ if backing['service_uri'] is not None:
+ if serial_port.backing.serviceURI == backing['service_uri'] and \
+ serial_port.backing.proxyURI == backing['proxy_uri']:
+ return True
+ if backing['pipe_name'] is not None:
+ if serial_port.backing.pipeName == backing['pipe_name']:
+ return True
+ if backing['device_name'] is not None:
+ if serial_port.backing.deviceName == backing['device_name']:
+ return True
+ if backing['file_path'] is not None:
+ if serial_port.backing.fileName == backing['file_path']:
+ return True
+
+ return False
+
+
+def main():
+ """
+ Main method
+ """
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ backings=dict(type='list', elements='dict', required=True,
+ options=dict(
+ backing_type=dict(type='str', required=True, aliases=['type']),
+ pipe_name=dict(type='str', default=None),
+ endpoint=dict(type='str', choices=['client', 'server'], default='client'),
+ no_rx_loss=dict(type='bool', default=False),
+ service_uri=dict(type='str', default=None),
+ proxy_uri=dict(type='str', default=None),
+ direction=dict(type='str', choices=['client', 'server'], default='client'),
+ device_name=dict(type='str', default=None),
+ file_path=dict(type='str', default=None),
+ yield_on_poll=dict(type='bool', default=True),
+ state=dict(type='str', choices=['present', 'absent'], default='present')
+ ),
+ required_if=[
+ ['backing_type', 'pipe', ['pipe_name', 'endpoint', 'no_rx_loss']],
+ ['backing_type', 'network', ['service_uri', 'direction']],
+ ['backing_type', 'device', ['device_name']],
+ ['backing_type', 'file', ['file_path']]
+ ]),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ mutually_exclusive=[
+ ['name', 'uuid', 'moid']
+ ],
+ )
+ result = {'failed': False, 'changed': False}
+
+ pyv = PyVmomiHelper(module)
+ # Check if the VM exists before continuing
+ vm_obj = pyv.get_vm()
+
+ if vm_obj:
+ proceed = pyv.check_vm_state(vm_obj)
+ if proceed:
+ result = pyv.reconfigure_vm_serial_port(vm_obj)
+
+ else:
+ # We are unable to find the virtual machine user specified
+ # Bail out
+ vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('vm_id'))
+ module.fail_json(msg="Unable to manage serial ports for non-existing"
+ " virtual machine '%s'." % vm_id)
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_snapshot.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_snapshot.py
new file mode 100644
index 000000000..6e33d2b95
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_snapshot.py
@@ -0,0 +1,462 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# This module is also sponsored by E.T.A.I. (www.etai.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_snapshot
+short_description: Manages virtual machines snapshots in vCenter
+description:
+ - This module can be used to create, delete and update snapshot(s) of the given virtual machine.
+ - All parameters and VMware object names are case sensitive.
+author:
+ - Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+options:
+ state:
+ description:
+ - Manage snapshot(s) attached to a specific virtual machine.
+ - If set to C(present) and snapshot absent, then will create a new snapshot with the given name.
+ - If set to C(present) and snapshot present, then no changes are made.
+ - If set to C(absent) and snapshot present, then snapshot with the given name is removed.
+ - If set to C(absent) and snapshot absent, then no changes are made.
+ - If set to C(revert) and snapshot present, then virtual machine state is reverted to the given snapshot.
+ - If set to C(revert) and snapshot absent, then no changes are made.
+ - If set to C(remove_all) and snapshot(s) present, then all snapshot(s) will be removed.
+ - If set to C(remove_all) and snapshot(s) absent, then no changes are made.
+ choices: ['present', 'absent', 'revert', 'remove_all']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the virtual machine to work with.
+ - This is required parameter, if C(uuid) or C(moid) is not supplied.
+ type: str
+ name_match:
+ description:
+ - If multiple VMs matching the name, use the first or last found.
+ default: 'first'
+ choices: ['first', 'last']
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to manage if known, this is VMware's BIOS UUID by default.
+ - This is required if C(name) or C(moid) parameter is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is required parameter, if C(name) is supplied.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter.
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ description:
+ - Destination datacenter for the deploy operation.
+ required: true
+ type: str
+ snapshot_name:
+ description:
+ - Sets the snapshot name to manage.
+ - This param is required only if state is not C(remove_all)
+ type: str
+ description:
+ description:
+ - Define an arbitrary description to attach to snapshot.
+ default: ''
+ type: str
+ quiesce:
+ description:
+ - If set to C(true) and virtual machine is powered on, it will quiesce the file system in virtual machine.
+ - Note that VMware Tools are required for this flag.
+ - If virtual machine is powered off or VMware Tools are not available, then this flag is set to C(false).
+ - If virtual machine does not provide capability to take quiesce snapshot, then this flag is set to C(false).
+ required: false
+ type: bool
+ default: false
+ memory_dump:
+ description:
+ - If set to C(true), memory dump of virtual machine is also included in snapshot.
+ - Note that memory snapshots take time and resources, this will take longer time to create.
+ - If virtual machine does not provide capability to take memory snapshot, then this flag is set to C(false).
+ required: false
+ type: bool
+ default: false
+ remove_children:
+ description:
+ - If set to C(true) and state is set to C(absent), then entire snapshot subtree is set for removal.
+ required: false
+ type: bool
+ default: false
+ new_snapshot_name:
+ description:
+ - Value to rename the existing snapshot to.
+ type: str
+ new_description:
+ description:
+ - Value to change the description of an existing snapshot to.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+ - name: Create a snapshot
+ community.vmware.vmware_guest_snapshot:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "/{{ datacenter_name }}/vm/"
+ name: "{{ guest_name }}"
+ state: present
+ snapshot_name: snap1
+ description: snap1_description
+ delegate_to: localhost
+
+ - name: Remove a snapshot
+ community.vmware.vmware_guest_snapshot:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "/{{ datacenter_name }}/vm/"
+ name: "{{ guest_name }}"
+ state: absent
+ snapshot_name: snap1
+ delegate_to: localhost
+
+ - name: Revert to a snapshot
+ community.vmware.vmware_guest_snapshot:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "/{{ datacenter_name }}/vm/"
+ name: "{{ guest_name }}"
+ state: revert
+ snapshot_name: snap1
+ delegate_to: localhost
+
+ - name: Remove all snapshots of a VM
+ community.vmware.vmware_guest_snapshot:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "/{{ datacenter_name }}/vm/"
+ name: "{{ guest_name }}"
+ state: remove_all
+ delegate_to: localhost
+
+ - name: Remove all snapshots of a VM using MoID
+ community.vmware.vmware_guest_snapshot:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "/{{ datacenter_name }}/vm/"
+ moid: vm-42
+ state: remove_all
+ delegate_to: localhost
+
+ - name: Take snapshot of a VM using quiesce and memory flag on
+ community.vmware.vmware_guest_snapshot:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "/{{ datacenter_name }}/vm/"
+ name: "{{ guest_name }}"
+ state: present
+ snapshot_name: dummy_vm_snap_0001
+ quiesce: true
+ memory_dump: true
+ delegate_to: localhost
+
+ - name: Remove a snapshot and snapshot subtree
+ community.vmware.vmware_guest_snapshot:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "/{{ datacenter_name }}/vm/"
+ name: "{{ guest_name }}"
+ state: absent
+ remove_children: true
+ snapshot_name: snap1
+ delegate_to: localhost
+
+ - name: Rename a snapshot
+ community.vmware.vmware_guest_snapshot:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "/{{ datacenter_name }}/vm/"
+ name: "{{ guest_name }}"
+ state: present
+ snapshot_name: current_snap_name
+ new_snapshot_name: im_renamed
+ new_description: "{{ new_snapshot_description }}"
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+snapshot_results:
+ description: metadata about the virtual machine snapshots
+ returned: always
+ type: dict
+ sample: {
+ "current_snapshot": {
+ "creation_time": "2019-04-09T14:40:26.617427+00:00",
+ "description": "Snapshot 4 example",
+ "id": 4,
+ "name": "snapshot4",
+ "state": "poweredOff"
+ },
+ "snapshots": [
+ {
+ "creation_time": "2019-04-09T14:38:24.667543+00:00",
+ "description": "Snapshot 3 example",
+ "id": 3,
+ "name": "snapshot3",
+ "state": "poweredOff"
+ },
+ {
+ "creation_time": "2019-04-09T14:40:26.617427+00:00",
+ "description": "Snapshot 4 example",
+ "id": 4,
+ "name": "snapshot4",
+ "state": "poweredOff"
+ }
+ ]
+ }
+'''
+
+import time
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, list_snapshots, vmware_argument_spec
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+
+ @staticmethod
+ def wait_for_task(task):
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
+ # https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
+ while task.info.state not in ['success', 'error']:
+ time.sleep(1)
+
+ def get_snapshots_by_name_recursively(self, snapshots, snapname):
+ snap_obj = []
+ for snapshot in snapshots:
+ if snapshot.name == snapname:
+ snap_obj.append(snapshot)
+ else:
+ snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname)
+ return snap_obj
+
+ def snapshot_vm(self, vm):
+ memory_dump = False
+ quiesce = False
+ # Check if there is a latest snapshot already present as specified by user
+ if vm.snapshot is not None:
+ snap_obj = self.get_snapshots_by_name_recursively(vm.snapshot.rootSnapshotList,
+ self.module.params["snapshot_name"])
+ if snap_obj:
+ # Snapshot already exists, do not anything.
+ self.module.exit_json(changed=False,
+ msg="Snapshot named [%(snapshot_name)s] already exists and is current." % self.module.params)
+ # Check if Virtual Machine provides capabilities for Quiesce and Memory Snapshots
+ if vm.capability.quiescedSnapshotsSupported:
+ quiesce = self.module.params['quiesce']
+ if vm.capability.memorySnapshotsSupported:
+ memory_dump = self.module.params['memory_dump']
+
+ task = None
+ try:
+ task = vm.CreateSnapshot(self.module.params["snapshot_name"],
+ self.module.params["description"],
+ memory_dump,
+ quiesce)
+ except vim.fault.RestrictedVersion as exc:
+ self.module.fail_json(msg="Failed to take snapshot due to VMware Licence"
+ " restriction : %s" % to_native(exc.msg))
+ except Exception as exc:
+ self.module.fail_json(msg="Failed to create snapshot of virtual machine"
+ " %s due to %s" % (self.module.params['name'], to_native(exc)))
+ return task
+
+ def rename_snapshot(self, vm):
+ if vm.snapshot is None:
+ vm_id = self.module.params.get('uuid') or self.module.params.get('name') or self.params.get('moid')
+ self.module.fail_json(msg="virtual machine - %s doesn't have any snapshots" % vm_id)
+
+ snap_obj = self.get_snapshots_by_name_recursively(vm.snapshot.rootSnapshotList,
+ self.module.params["snapshot_name"])
+ task = None
+ if len(snap_obj) == 1:
+ snap_obj = snap_obj[0].snapshot
+ if self.module.params["new_snapshot_name"] and self.module.params["new_description"]:
+ task = snap_obj.RenameSnapshot(name=self.module.params["new_snapshot_name"],
+ description=self.module.params["new_description"])
+ elif self.module.params["new_snapshot_name"]:
+ task = snap_obj.RenameSnapshot(name=self.module.params["new_snapshot_name"])
+ else:
+ task = snap_obj.RenameSnapshot(description=self.module.params["new_description"])
+ else:
+ vm_id = self.module.params.get('uuid') or self.module.params.get('name') or self.params.get('moid')
+ self.module.exit_json(
+ msg="Couldn't find any snapshots with specified name: %s on VM: %s" %
+ (self.module.params["snapshot_name"], vm_id))
+ return task
+
+ def remove_or_revert_snapshot(self, vm):
+ if vm.snapshot is None:
+ vm_name = (self.module.params.get('uuid') or self.module.params.get('name'))
+ if self.module.params.get('state') == 'revert':
+ self.module.fail_json(msg="virtual machine - %s does not"
+ " have any snapshots to revert to." % vm_name)
+ self.module.exit_json(msg="virtual machine - %s doesn't have any"
+ " snapshots to remove." % vm_name)
+
+ snap_obj = self.get_snapshots_by_name_recursively(vm.snapshot.rootSnapshotList,
+ self.module.params["snapshot_name"])
+ task = None
+ if len(snap_obj) == 1:
+ snap_obj = snap_obj[0].snapshot
+ if self.module.params["state"] == "absent":
+ # Remove subtree depending upon the user input
+ remove_children = self.module.params.get('remove_children', False)
+ task = snap_obj.RemoveSnapshot_Task(remove_children)
+ elif self.module.params["state"] == "revert":
+ task = snap_obj.RevertToSnapshot_Task()
+ else:
+ vm_id = self.module.params.get('uuid') or self.module.params.get('name') or self.params.get('moid')
+ self.module.exit_json(msg="Couldn't find any snapshots with"
+ " specified name: %s on VM: %s" % (self.module.params["snapshot_name"], vm_id))
+
+ return task
+
+ def apply_snapshot_op(self, vm):
+ result = {}
+ if self.module.params["state"] == "present":
+ if self.module.params["new_snapshot_name"] or self.module.params["new_description"]:
+ self.rename_snapshot(vm)
+ result = {'changed': True, 'failed': False, 'renamed': True}
+ task = None
+ else:
+ task = self.snapshot_vm(vm)
+ elif self.module.params["state"] in ["absent", "revert"]:
+ task = self.remove_or_revert_snapshot(vm)
+ elif self.module.params["state"] == "remove_all":
+ task = vm.RemoveAllSnapshots()
+ else:
+ # This should not happen
+ raise AssertionError()
+
+ if task:
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ result = {'changed': False, 'failed': True, 'msg': task.info.error.msg}
+ else:
+ result = {'changed': True, 'failed': False, 'snapshot_results': list_snapshots(vm)}
+
+ return result
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ state=dict(default='present', choices=['present', 'absent', 'revert', 'remove_all']),
+ name=dict(type='str'),
+ name_match=dict(type='str', choices=['first', 'last'], default='first'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ datacenter=dict(required=True, type='str'),
+ snapshot_name=dict(type='str'),
+ description=dict(type='str', default=''),
+ quiesce=dict(type='bool', default=False),
+ memory_dump=dict(type='bool', default=False),
+ remove_children=dict(type='bool', default=False),
+ new_snapshot_name=dict(type='str'),
+ new_description=dict(type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=[
+ ['name', 'folder']
+ ],
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ )
+
+ if module.params['folder']:
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ pyv = PyVmomiHelper(module)
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ if not vm:
+ vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
+ module.fail_json(msg="Unable to manage snapshots for non-existing VM %s" % vm_id)
+
+ if not module.params['snapshot_name'] and module.params['state'] != 'remove_all':
+ module.fail_json(msg="snapshot_name param is required when state is '%(state)s'" % module.params)
+
+ result = pyv.apply_snapshot_op(vm)
+
+ if 'failed' not in result:
+ result['failed'] = False
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_snapshot_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_snapshot_info.py
new file mode 100644
index 000000000..b408bb222
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_snapshot_info.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_snapshot_info
+short_description: Gather info about virtual machine's snapshots in vCenter
+description:
+ - This module can be used to gather information about virtual machine's snapshots.
+author:
+ - Abhijeet Kasurde (@Akasurde)
+options:
+ name:
+ description:
+ - Name of the VM to work with.
+ - This is required if C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to manage if known, this is VMware's BIOS UUID by default.
+ - This is required if C(name) or C(moid) parameter is not supplied.
+ - The C(folder) is ignored, if C(uuid) is provided.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is required parameter, if C(name) is supplied.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ description:
+ - Name of the datacenter.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather snapshot information about the virtual machine in the given vCenter
+ community.vmware.vmware_guest_snapshot_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ folder: "/{{ datacenter_name }}/vm/"
+ name: "{{ guest_name }}"
+ delegate_to: localhost
+ register: snapshot_info
+
+- name: Gather snapshot information about the virtual machine using MoID
+ community.vmware.vmware_guest_snapshot_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ moid: vm-42
+ delegate_to: localhost
+ register: snapshot_info
+'''
+
+RETURN = r'''
+guest_snapshots:
+ description: metadata about the snapshot information
+ returned: always
+ type: dict
+ sample: {
+ "current_snapshot": {
+ "creation_time": "2018-02-10T14:48:31.999459+00:00",
+ "description": "",
+ "id": 28,
+ "name": "snap_0003",
+ "state": "poweredOff",
+ "quiesced": false
+ },
+ "snapshots": [
+ {
+ "creation_time": "2018-02-10T14:48:31.999459+00:00",
+ "description": "",
+ "id": 28,
+ "name": "snap_0003",
+ "state": "poweredOff",
+ "quiesced": false
+ }
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, list_snapshots, vmware_argument_spec
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+
+ @staticmethod
+ def gather_guest_snapshot_info(vm_obj=None):
+ """
+ Return snapshot related information about given virtual machine
+ Args:
+ vm_obj: Virtual Machine Managed object
+
+ Returns: Dictionary containing snapshot information
+
+ """
+ if vm_obj is None:
+ return {}
+ return list_snapshots(vm=vm_obj)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ datacenter=dict(required=True, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=[
+ ['name', 'folder']
+ ],
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ supports_check_mode=True,
+ )
+
+ if module.params['folder']:
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ pyv = PyVmomiHelper(module)
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ if not vm:
+ # If UUID is set, get_vm select UUID, show error message accordingly.
+ vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
+ module.fail_json(msg="Unable to gather information about snapshots for"
+ " non-existing VM ['%s']" % vm_id)
+
+ results = dict(changed=False, guest_snapshots=pyv.gather_guest_snapshot_info(vm_obj=vm))
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_storage_policy.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_storage_policy.py
new file mode 100644
index 000000000..362020c7e
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_storage_policy.py
@@ -0,0 +1,458 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, Tyler Gates <tgates81@gmail.com>
+#
+# Special thanks to:
+# * Vadim Aleksandrov <valeksandrov@me.com>: Original author of python script
+# `set_vm_storage_policy.py` from
+# which most methods were derived.
+# * William Lam (https://github.com/lamw): Author of script
+# `list_vm_storage_policy.py` whose
+# ideas were inspiration for
+# Vadim's script.
+# * Abhijeet Kasurde <akasurde@redhat.com>: Ansible modulization loosely
+# modeled after
+# `vmware_guest_disk.py'.
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_storage_policy
+short_description: Set VM Home and disk(s) storage policy profiles.
+description:
+ - This module can be used to enforce storage policy profiles per disk and/or VM Home on a virtual machine.
+author:
+ - Tyler Gates (@tgates81)
+options:
+ name:
+ description:
+ - Name of the virtual machine.
+ - One of C(name), C(uuid), or C(moid) are required to define the virtual machine.
+ type: str
+ required: false
+ uuid:
+ description:
+ - UUID of the virtual machine.
+ - One of C(name), C(uuid), or C(moid) are required to define the virtual machine.
+ type: str
+ required: false
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - One of C(name), C(uuid), or C(moid) are required to define the virtual machine.
+ type: str
+ required: false
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is a required parameter if multiple VMs are found with same name.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter.
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ required: false
+ vm_home:
+ description:
+ - A storage profile policy to set on VM Home.
+ - All values and parameters are case sensitive.
+ - At least one of C(disk) or C(vm_home) are required parameters.
+ required: false
+ type: str
+ disk:
+ description:
+ - A list of disks with storage profile policies to enforce.
+ - All values and parameters are case sensitive.
+ - At least one of C(disk) and C(vm_home) are required parameters.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ unit_number:
+ description:
+ - Disk Unit Number.
+ - Valid values range from 0 to 15.
+ type: int
+ required: true
+ controller_number:
+ description:
+ - SCSI controller number.
+ - Valid values range from 0 to 3.
+ type: int
+ default: 0
+ policy:
+ description:
+ - Name of the storage profile policy to enforce for the disk.
+ type: str
+ required: true
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Enforce storepol1 policy for disk 0 and 1 on SCSI controller 0 using UUID
+ community.vmware.vmware_guest_storage_policy:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ uuid: cefd316c-fc19-45f3-a539-2cd03427a78d
+ disk:
+ - unit_number: 0
+ controller_number: 0
+ policy: storepol1
+ - unit_number: 1
+ controller_number: 0
+ policy: storepol1
+ delegate_to: localhost
+ register: policy_status
+
+- name: Enforce storepol1 policy for VM Home using name
+ community.vmware.vmware_guest_storage_policy:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ name: hostname1
+ vm_home: storepol1
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+msg:
+ description: Informational message on the job result.
+ type: str
+ returned: always
+ sample: "Policies successfully set."
+changed_policies:
+ description: Dictionary containing the changed policies of disk (list of dictionaries) and vm_home.
+ type: dict
+ returned: always
+ sample: {
+ "disk": [
+ {
+ "policy": "storepol1",
+ "unit_number": 0
+ }
+ ],
+ "vm_home": "storepol1"
+ }
+'''
+
+import traceback
+from ansible.module_utils.basic import missing_required_lib
+PYVMOMI_IMP_ERR = None
+try:
+ from pyVmomi import pbm, vim
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+ PYVMOMI_IMP_ERR = traceback.format_exc()
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, wait_for_task
+from ansible_collections.community.vmware.plugins.module_utils.vmware_spbm import SPBM
+
+
+class SPBM_helper(SPBM):
+ def __init__(self, module):
+ super().__init__(module)
+
+ def SearchStorageProfileByName(self, profileManager, name):
+ """
+ Search VMware storage policy profile by name.
+
+ :param profileManager: A VMware Storage Policy Service manager object.
+ :type profileManager: pbm.profile.ProfileManager
+ :param name: A VMware Storage Policy profile name.
+ :type name: str
+ :returns: A VMware Storage Policy profile object.
+ :rtype: pbm.profile.Profile
+ """
+
+ profileIds = profileManager.PbmQueryProfile(
+ resourceType=pbm.profile.ResourceType(resourceType="STORAGE"),
+ profileCategory="REQUIREMENT"
+ )
+ if len(profileIds) > 0:
+ storageProfiles = profileManager.PbmRetrieveContent(
+ profileIds=profileIds)
+
+ for storageProfile in storageProfiles:
+ if storageProfile.name == name:
+ return storageProfile
+
+ def CheckAssociatedStorageProfile(self, profileManager, ref, name):
+ """
+ Check the associated storage policy profile.
+
+ :param profileManager: A VMware Storage Policy Service manager object.
+ :type profileManager: pbm.profile.ProfileManager
+ :param ref: A server object ref to a virtual machine, virtual disk,
+ or datastore.
+ :type ref: pbm.ServerObjectRef
+ :param name: A VMware storage policy profile name.
+ :type name: str
+ :returns: True if storage policy profile by name is associated to ref.
+ :rtype: bool
+ """
+
+ profileIds = profileManager.PbmQueryAssociatedProfile(ref)
+ if len(profileIds) > 0:
+ profiles = profileManager.PbmRetrieveContent(profileIds=profileIds)
+ for profile in profiles:
+ if profile.name == name:
+ return True
+ return False
+
+ def SetVMHomeStorageProfile(self, vm, profile):
+ """
+ Set VM Home storage policy profile.
+
+ :param vm: A virtual machine object.
+ :type vm: VirtualMachine
+ :param profile: A VMware Storage Policy profile.
+ :type profile: pbm.profile.Profile
+ :returns: VMware task object.
+ :rtype: Task
+ """
+
+ spec = vim.vm.ConfigSpec()
+ profileSpec = vim.vm.DefinedProfileSpec()
+ profileSpec.profileId = profile.profileId.uniqueId
+ spec.vmProfile = [profileSpec]
+ return vm.ReconfigVM_Task(spec)
+
+ def GetVirtualDiskObj(self, vm, unit_number, controller_number):
+ """
+ Get a virtual disk object.
+
+ :param vm: A virtual machine object.
+ :type vm: VirtualMachine
+ :param unit_number: virtual machine's disk unit number.
+ :type unit_number: int
+ :param controller_number: virtual machine's controller number.
+ :type controller_number: int
+ :returns: VirtualDisk object if exists, else None.
+ :rtype: VirtualDisk, None
+ """
+ controllerKey = None
+ for device in vm.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualSCSIController):
+ if device.busNumber == controller_number:
+ controllerKey = device.key
+ break
+
+ if controllerKey is not None: # if controller was found check disk
+ for device in vm.config.hardware.device:
+ if not isinstance(device, vim.vm.device.VirtualDisk):
+ continue
+ if int(device.unitNumber) == int(unit_number) and \
+ int(device.controllerKey) == controllerKey:
+ return device
+
+ return None
+
+ def SetVMDiskStorageProfile(self, vm, unit_number, controller_number, profile):
+ """
+ Set VM's disk storage policy profile.
+
+ :param vm: A virtual machine object
+ :type vm: VirtualMachine
+ :param unit_number: virtual machine's disk unit number.
+ :type unit_number: int
+ :param controller_number: virtual machine's controller number.
+ :type controller_number: int
+ :param profile: A VMware Storage Policy profile
+ :type profile: pbm.profile.Profile
+ :returns: VMware task object.
+ :rtype: Task
+ """
+
+ spec = vim.vm.ConfigSpec()
+ profileSpec = vim.vm.DefinedProfileSpec()
+ profileSpec.profileId = profile.profileId.uniqueId
+
+ deviceSpec = vim.vm.device.VirtualDeviceSpec()
+ deviceSpec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ disk_obj = self.GetVirtualDiskObj(vm, unit_number, controller_number)
+ deviceSpec.device = disk_obj
+ deviceSpec.profile = [profileSpec]
+ spec.deviceChange = [deviceSpec]
+ return vm.ReconfigVM_Task(spec)
+
+ def ensure_storage_policies(self, vm_obj):
+ """
+ Ensure VM storage profile policies.
+
+ :param vm_obj: VMware VM object.
+ :type vm_obj: VirtualMachine
+ :exits: self.module.exit_json on success, else self.module.fail_json.
+ """
+
+ disks = self.module.params.get('disk')
+ vm_home = self.module.params.get('vm_home')
+ success_msg = "Policies successfully set."
+ result = dict(
+ changed=False,
+ msg="",
+ changed_policies=dict(disk=[],
+ vm_home="",
+ ),
+ )
+
+ # Connect into vcenter and get the profile manager for the VM.
+ self.get_spbm_connection()
+ pm = self.spbm_content.profileManager
+
+ #
+ # VM HOME
+ #
+ if vm_home:
+ policy = vm_home
+ pmObjectType = pbm.ServerObjectRef.ObjectType("virtualMachine")
+ pmRef = pbm.ServerObjectRef(key=vm_obj._moId,
+ objectType=pmObjectType)
+ pol_obj = self.SearchStorageProfileByName(pm, policy)
+
+ if not pol_obj:
+ result['msg'] = "Unable to find storage policy `%s' for vm_home" % policy
+ self.module.fail_json(**result)
+
+ if not self.CheckAssociatedStorageProfile(pm, pmRef, policy):
+ # Existing policy is different than requested. Set, wait for
+ # task success, and exit.
+ if not self.module.check_mode:
+ task = self.SetVMHomeStorageProfile(vm_obj, pol_obj)
+ wait_for_task(task) # will raise an Exception on failure
+ result['changed'] = True
+ result['changed_policies']['vm_home'] = policy
+
+ #
+ # DISKS
+ #
+ if disks is None:
+ disks = list()
+ # Check the requested disks[] information is sane or fail by looking up
+ # and storing the object(s) in a new dict.
+ disks_objs = dict() # {unit_number: {disk: <obj>, policy: <obj>}}
+ for disk in disks:
+ policy = str(disk['policy'])
+ unit_number = int(disk['unit_number'])
+ controller_number = int(disk['controller_number'])
+ disk_obj = self.GetVirtualDiskObj(vm_obj, unit_number, controller_number)
+ pol_obj = self.SearchStorageProfileByName(pm, policy)
+ if not pol_obj:
+ result['msg'] = "Unable to find storage policy `%s' for disk %s." % (policy, disk)
+ self.module.fail_json(**result)
+ if not disk_obj:
+ errmsg = "Unable to find disk for controller_number '%s' unit_number '%s'. 7 is reserved for SCSI adapters."
+ result['msg'] = errmsg % (controller_number, unit_number)
+ self.module.fail_json(**result)
+ disks_objs[unit_number] = dict(disk=disk_obj, policy=pol_obj)
+
+ # All requested profiles are valid. Iterate through each disk and set
+ # accordingly.
+ for disk in disks:
+ policy = str(disk['policy'])
+ unit_number = int(disk['unit_number'])
+ controller_number = int(disk['controller_number'])
+ disk_obj = disks_objs[unit_number]['disk']
+ pol_obj = disks_objs[unit_number]['policy']
+ pmObjectType = pbm.ServerObjectRef.ObjectType("virtualDiskId")
+ pmRef = pbm.ServerObjectRef(key="%s:%s"
+ % (vm_obj._moId, disk_obj.key),
+ objectType=pmObjectType)
+
+ if not self.CheckAssociatedStorageProfile(pm, pmRef, policy):
+ # Existing policy is different than requested. Set, wait for
+ # task success, and exit.
+ if not self.module.check_mode:
+ task = self.SetVMDiskStorageProfile(vm_obj, unit_number,
+ controller_number,
+ pol_obj)
+ wait_for_task(task)
+ result['changed'] = True
+ result['changed_policies']['disk'].append(disk)
+
+ #
+ # END
+ #
+ # Check our results and exit.
+ if result['changed']:
+ result['msg'] = success_msg
+ self.module.exit_json(**result)
+
+
+def run_module():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ folder=dict(type='str'),
+ disk=dict(type='list',
+ required=False,
+ elements='dict',
+ options=dict(
+ unit_number=dict(type='int', required=True),
+ controller_number=dict(type='int', default=0),
+ policy=dict(type='str', required=True)
+ )),
+ vm_home=dict(type='str'),
+ )
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid'],
+ ['disk', 'vm_home'],
+ ],
+ )
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg=missing_required_lib("pyVmomi"),
+ exception=PYVMOMI_IMP_ERR)
+
+ if module.params['folder']:
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ spbm_h = SPBM_helper(module)
+ # Check if the VM exists before continuing
+ vm = spbm_h.get_vm()
+ if not vm:
+ module.fail_json(msg="Unable to find virtual machine `%s'" %
+ (module.params.get('name')
+ or module.params.get('uuid')
+ or module.params.get('moid')))
+
+ try:
+ spbm_h.ensure_storage_policies(vm)
+ except Exception as e:
+ module.fail_json(msg="Failed to set storage policies for virtual"
+ "machine '%s' with exception: %s"
+ % (vm.name, to_native(e)))
+
+
+def main():
+ run_module()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_info.py
new file mode 100644
index 000000000..77c152474
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_info.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# Copyright: (c) 2019, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_tools_info
+short_description: Gather info about VMware tools installed in VM
+description:
+ - Gather information about the VMware tools installed in virtual machine.
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+options:
+ name:
+ description:
+ - Name of the VM to get VMware tools info.
+ - This is required if C(uuid) or C(moid) is not supplied.
+ type: str
+ name_match:
+ description:
+ - If multiple VMs matching the name, use the first or last found.
+ default: 'first'
+ choices: ['first', 'last']
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to manage if known, this is VMware's unique identifier.
+ - This is required if C(name) or C(moid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is required if name is supplied.
+ - The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ description:
+ - The datacenter name to which virtual machine belongs to.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather VMware tools info installed in VM specified by uuid
+ community.vmware.vmware_guest_tools_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ delegate_to: localhost
+ register: vmtools_info
+
+- name: Gather VMware tools info installed in VM specified by name
+ community.vmware.vmware_guest_tools_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ name: "{{ vm_name }}"
+ delegate_to: localhost
+ register: vmtools_info
+'''
+
+RETURN = r'''
+vmtools_info:
+ description: metadata about the VMware tools installed in virtual machine
+ returned: always
+ type: dict
+ sample: {
+ "vm_uuid": null,
+ "vm_moid": null,
+ "vm_use_instance_uuid": false,
+ "vm_guest_fullname": "Microsoft Windows 10 (64-bit)",
+ "vm_guest_hostname": "test",
+ "vm_guest_id": "windows9_64Guest",
+ "vm_hw_version": "vmx-14",
+ "vm_ipaddress": "10.10.10.10",
+ "vm_name": "test_vm",
+ "vm_tools_install_status": "toolsOk",
+ "vm_tools_install_type": "guestToolsTypeMSI",
+ "vm_tools_last_install_count": 0,
+ "vm_tools_running_status": "guestToolsRunning",
+ "vm_tools_upgrade_policy": "manual",
+ "vm_tools_version": 10341,
+ "vm_tools_version_status": "guestToolsCurrent"
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.name = self.params['name']
+ self.uuid = self.params['uuid']
+ self.moid = self.params['moid']
+ self.use_instance_uuid = self.params['use_instance_uuid']
+
+ def gather_vmtools_info(self):
+ vmtools_info = dict(
+ vm_name=self.name,
+ vm_uuid=self.uuid,
+ vm_moid=self.moid,
+ vm_use_instance_uuid=self.use_instance_uuid,
+ vm_hw_version=self.current_vm_obj.config.version,
+ vm_guest_id=self.current_vm_obj.summary.guest.guestId,
+ vm_guest_fullname=self.current_vm_obj.summary.guest.guestFullName,
+ vm_guest_hostname=self.current_vm_obj.summary.guest.hostName,
+ vm_ipaddress=self.current_vm_obj.summary.guest.ipAddress,
+ vm_tools_running_status=self.current_vm_obj.summary.guest.toolsRunningStatus,
+ vm_tools_install_status=self.current_vm_obj.summary.guest.toolsStatus,
+ vm_tools_version_status=self.current_vm_obj.summary.guest.toolsVersionStatus,
+ vm_tools_install_type=self.current_vm_obj.config.tools.toolsInstallType,
+ vm_tools_version=self.current_vm_obj.config.tools.toolsVersion,
+ vm_tools_upgrade_policy=self.current_vm_obj.config.tools.toolsUpgradePolicy,
+ vm_tools_last_install_count=self.current_vm_obj.config.tools.lastInstallInfo.counter,
+ )
+
+ return {'changed': False, 'failed': False, 'vmtools_info': vmtools_info}
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ name_match=dict(
+ choices=['first', 'last'],
+ default='first',
+ type='str'
+ ),
+ folder=dict(type='str'),
+ datacenter=dict(type='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ mutually_exclusive=[
+ ['name', 'uuid', 'moid']
+ ],
+ supports_check_mode=True,
+ )
+
+ pyv = PyVmomiHelper(module)
+ vm = pyv.get_vm()
+ if not vm:
+ vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
+ module.fail_json(msg='Unable to find the specified virtual machine using: %s' % vm_id)
+ results = pyv.gather_vmtools_info()
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_upgrade.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_upgrade.py
new file mode 100644
index 000000000..73ef09613
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_upgrade.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Mike Klebolt <michael.klebolt@centurylink.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_tools_upgrade
+short_description: Module to upgrade VMTools
+description:
+ - This module upgrades the VMware Tools on Windows and Linux guests and reboots them.
+notes:
+ - "In order to upgrade VMTools, please power on virtual machine before hand - either 'manually' or
+ using module M(community.vmware.vmware_guest_powerstate)."
+options:
+ name:
+ description:
+ - Name of the virtual machine to work with.
+ - 'This is required if C(uuid) or C(moid) is not supplied.'
+ type: str
+ name_match:
+ description:
+ - If multiple virtual machines matching the name, use the first or last found.
+ default: 'first'
+ choices: ['first', 'last']
+ type: str
+ uuid:
+ description:
+ - "UUID of the instance to manage if known, this is VMware's unique identifier."
+ - This is required if C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is required, if C(name) is supplied.
+ - "The folder should include the datacenter. ESX's datacenter is ha-datacenter"
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ description:
+ - Destination datacenter where the virtual machine exists.
+ required: true
+ type: str
+ force_upgrade:
+ description:
+ - This flag overrides the guest operating system detection and forcibly upgrade VMware tools or open-vm-tools.
+ - "This is useful when VMware tools is too old and unable to detect the 'guestFamily' value."
+ - 'Using this flag may sometime give unexpected results since module will override the default'
+ - "behaviour of 'guestFamily' detection."
+ default: false
+ type: bool
+ required: false
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+author:
+ - Mike Klebolt (@MikeKlebolt) <michael.klebolt@centurylink.com>
+'''
+
+EXAMPLES = r'''
+- name: Get VM UUID
+ vmware_guest_facts:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ folder: "/{{datacenter}}/vm"
+ name: "{{ vm_name }}"
+ delegate_to: localhost
+ register: vm_facts
+
+- name: Upgrade VMware Tools using uuid
+ community.vmware.vmware_guest_tools_upgrade:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ uuid: "{{ vm_facts.instance.hw_product_uuid }}"
+ delegate_to: localhost
+
+- name: Upgrade VMware Tools using MoID
+ community.vmware.vmware_guest_tools_upgrade:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ moid: vm-42
+ delegate_to: localhost
+'''
+
+RETURN = r''' # '''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task
+from ansible.module_utils._text import to_native
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+
+ def upgrade_tools(self, vm):
+ result = {'failed': False, 'changed': False, 'msg': ''}
+ # Exit if VMware tools is already up to date
+ if vm.guest.toolsStatus == "toolsOk":
+ result.update(
+ changed=False,
+ msg="VMware tools is already up to date",
+ )
+ return result
+
+ # Fail if VM is not powered on
+ elif vm.summary.runtime.powerState != "poweredOn":
+ result.update(
+ failed=True,
+ msg="VM must be powered on to upgrade tools",
+ )
+ return result
+
+ # Fail if VMware tools is either not running or not installed
+ elif vm.guest.toolsStatus in ["toolsNotRunning", "toolsNotInstalled"]:
+ result.update(
+ failed=True,
+ msg="VMware tools is either not running or not installed",
+ )
+ return result
+
+ # If vmware tools is out of date, check major OS family
+ # Upgrade tools on Linux and Windows guests
+ elif vm.guest.toolsStatus == "toolsOld":
+ try:
+ force = self.module.params.get('force_upgrade')
+ if force or vm.guest.guestFamily in ["linuxGuest", "windowsGuest"]:
+ task = vm.UpgradeTools()
+ changed, err_msg = wait_for_task(task)
+ result.update(changed=changed, msg=to_native(err_msg))
+ else:
+ result.update(msg='Guest Operating System is other than Linux and Windows.')
+ return result
+ except Exception as exc:
+ result.update(
+ failed=True,
+ msg='Error while upgrading VMware tools %s' % to_native(exc),
+ )
+ return result
+ else:
+ result.update(
+ failed=True,
+ msg="VMware tools could not be upgraded",
+ )
+ return result
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ name_match=dict(type='str', choices=['first', 'last'], default='first'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ folder=dict(type='str'),
+ datacenter=dict(type='str', required=True),
+ force_upgrade=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ]
+ )
+
+ if module.params['folder']:
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ pyv = PyVmomiHelper(module)
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ # VM already exists
+ if vm:
+ try:
+ result = pyv.upgrade_tools(vm)
+ if result['changed']:
+ module.exit_json(changed=result['changed'])
+ elif result['failed']:
+ module.fail_json(msg=result['msg'])
+ else:
+ module.exit_json(msg=result['msg'], changed=result['changed'])
+ except Exception as exc:
+ module.fail_json(msg='Unknown error: %s' % to_native(exc))
+ else:
+ vm_id = module.params.get('uuid') or module.params.get('name') or module.params.get('moid')
+ module.fail_json(msg='Unable to find VM %s' % vm_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_wait.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_wait.py
new file mode 100644
index 000000000..58224b32c
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_tools_wait.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Philippe Dellaert <philippe@dellaert.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_tools_wait
+short_description: Wait for VMware tools to become available
+description:
+ - This module can be used to wait for VMware tools to become available on the given VM and return facts.
+author:
+ - Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
+options:
+ name:
+ description:
+ - Name of the VM for which to wait until the tools become available.
+ - This is required if C(uuid) or C(moid) is not supplied.
+ type: str
+ name_match:
+ description:
+ - If multiple VMs match the name, use the first or last found.
+ default: 'first'
+ choices: ['first', 'last']
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is required only, if multiple VMs with same C(name) is found.
+ - The folder should include the datacenter. ESX's datacenter is C(ha-datacenter).
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ uuid:
+ description:
+ - UUID of the VM for which to wait until the tools become available, if known. This is VMware's unique identifier.
+ - This is required, if C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ timeout:
+ description:
+ - Max duration of the waiting period (seconds).
+ default: 500
+ type: int
+ datacenter:
+ description:
+ - Name of the datacenter.
+ - The datacenter to search for a virtual machine.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Wait for VMware tools to become available by UUID
+ vmware_guest_facts:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ folder: "/{{datacenter}}/vm"
+ name: "{{ vm_name }}"
+ delegate_to: localhost
+ register: vm_facts
+
+- name: Get UUID from previous task and pass it to this task
+ community.vmware.vmware_guest_tools_wait:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: "{{ vm_facts.instance.hw_product_uuid }}"
+ delegate_to: localhost
+ register: facts
+
+
+- name: Wait for VMware tools to become available by MoID
+ community.vmware.vmware_guest_tools_wait:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ moid: vm-42
+ delegate_to: localhost
+ register: facts
+
+- name: Wait for VMware tools to become available by name
+ community.vmware.vmware_guest_tools_wait:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test-vm
+ folder: "/{{datacenter}}/vm"
+ datacenter: "{{ datacenter }}"
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: metadata about the virtual machine
+ returned: always
+ type: dict
+ sample: None
+'''
+
+import datetime
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, gather_vm_facts, vmware_argument_spec
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+
+ def gather_facts(self, vm):
+ return gather_vm_facts(self.content, vm)
+
+ def wait_for_tools(self, vm, timeout):
+ tools_running = False
+ vm_facts = {}
+ start_at = datetime.datetime.now()
+
+ while start_at + timeout > datetime.datetime.now():
+ newvm = self.get_vm()
+ vm_facts = self.gather_facts(newvm)
+ if vm_facts['guest_tools_status'] == 'guestToolsRunning':
+ return {'changed': True, 'failed': False, 'instance': vm_facts}
+ time.sleep(5)
+
+ if not tools_running:
+ return {'failed': True, 'msg': 'VMware tools either not present or not running after {0} seconds'.format(timeout.total_seconds())}
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ name_match=dict(type='str', default='first', choices=['first', 'last']),
+ folder=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ timeout=dict(type='int', default=500),
+ datacenter=dict(type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ]
+ )
+
+ if module.params['folder']:
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ pyv = PyVmomiHelper(module)
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ if not vm:
+ vm_id = module.params.get('name') or module.params.get('uuid') or module.params.get('moid')
+ module.fail_json(msg="Unable to wait for VMware tools for non-existing VM '%s'." % vm_id)
+
+ timeout = datetime.timedelta(seconds=module.params['timeout'])
+
+ result = dict(changed=False)
+ try:
+ result = pyv.wait_for_tools(vm, timeout)
+ except Exception as e:
+ module.fail_json(msg="Waiting for VMware tools failed with"
+ " exception: {0:s}".format(to_native(e)))
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_tpm.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_tpm.py
new file mode 100644
index 000000000..3ea74a86f
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_tpm.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, Ansible Project
+# Copyright: (c) 2021, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_tpm
+short_description: Add or remove vTPM device for specified VM.
+description: >
+ This module is used for adding or removing Virtual Trusted Platform Module(vTPM) device for an existing
+ Virtual Machine. You must create a key provider on vCenter before you can add a vTPM. The ESXi hosts
+ running in your environment must be ESXi 6.7 or later (Windows guest OS), or 7.0 Update 2 (Linux guest OS).
+author:
+- Diane Wang (@Tomorrow9) <dianew@vmware.com>
+options:
+ name:
+ description:
+ - Name of the virtual machine.
+ - This is required if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to manage if known, this is VMware's unique identifier.
+ - This is required if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ folder:
+ description:
+ - VM folder, absolute or relative path to find an existing VM.
+ - This parameter is not required, only when multiple VMs are found with the same name.
+ - The folder should include the datacenter name.
+ - 'Examples:'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ description:
+ - The vCenter datacenter name used to get specified cluster or host.
+ - This parameter is case sensitive.
+ type: str
+ required: true
+ state:
+ description:
+ - State of vTPM device.
+ - If set to 'absent', vTPM device will be removed from VM.
+ - If set to 'present', vTPM device will be added if not present.
+ - Virtual machine should be turned off before add or remove vTPM device.
+ - Virtual machine should not contain snapshots before add vTPM device.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Add vTPM to specified VM
+ community.vmware.vmware_guest_tpm:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ name: "Test_VM"
+ state: present
+ delegate_to: localhost
+
+- name: Remove vTPM from specified VM
+ community.vmware.vmware_guest_tpm:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ name: "Test_VM"
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+instance:
+ description: metadata about the VM vTPM device
+ returned: always
+ type: dict
+ sample: None
+'''
+
+HAS_PYVMOMI = False
+try:
+ from pyVmomi import vim
+ HAS_PYVMOMI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, wait_for_task
+from ansible_collections.community.vmware.plugins.module_utils.vm_device_helper import PyVmomiDeviceHelper
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.device_helper = PyVmomiDeviceHelper(self.module)
+ self.config_spec = vim.vm.ConfigSpec()
+ self.config_spec.deviceChange = []
+ self.vm = None
+ self.vtpm_device = None
+
+ def get_vtpm_info(self, vm_obj=None, vtpm_device=None):
+ vtpm_info = dict()
+ if vm_obj:
+ for device in vm_obj.config.hardware.device:
+ if self.device_helper.is_tpm_device(device):
+ vtpm_device = device
+ if vtpm_device:
+ vtpm_info = dict(
+ key=vtpm_device.key,
+ label=vtpm_device.deviceInfo.label,
+ summary=vtpm_device.deviceInfo.summary,
+ )
+
+ return vtpm_info
+
+ def vtpm_operation(self, vm_obj=None):
+ results = {'failed': False, 'changed': False}
+ if not self.is_vcenter():
+ self.module.fail_json(msg="Please connect to vCenter Server to configure vTPM device of virtual machine.")
+
+ self.vm = vm_obj
+ if self.vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
+ self.module.fail_json(msg="Please make sure VM is powered off before configuring vTPM device,"
+ " current state is '%s'" % self.vm.runtime.powerState)
+
+ for device in self.vm.config.hardware.device:
+ if self.device_helper.is_tpm_device(device):
+ self.vtpm_device = device
+
+ if self.module.params['state'] == 'present':
+ if self.module.check_mode:
+ results['desired_operation'] = "add vTPM"
+ else:
+ results['vtpm_operation'] = "add vTPM"
+ if self.vtpm_device:
+ results['vtpm_info'] = self.get_vtpm_info(vtpm_device=self.vtpm_device)
+ results['msg'] = "vTPM device already exist on VM"
+ self.module.exit_json(**results)
+ else:
+ if self.module.check_mode:
+ results['changed'] = True
+ self.module.exit_json(**results)
+ vtpm_device_spec = self.device_helper.create_tpm()
+ if self.module.params['state'] == 'absent':
+ if self.module.check_mode:
+ results['desired_operation'] = "remove vTPM"
+ else:
+ results['vtpm_operation'] = "remove vTPM"
+ if self.vtpm_device is None:
+ results['msg'] = "No vTPM device found on VM"
+ self.module.exit_json(**results)
+ else:
+ if self.module.check_mode:
+ results['changed'] = True
+ self.module.exit_json(**results)
+ vtpm_device_spec = self.device_helper.remove_tpm(self.vtpm_device)
+ self.config_spec.deviceChange.append(vtpm_device_spec)
+
+ try:
+ task = self.vm.ReconfigVM_Task(spec=self.config_spec)
+ wait_for_task(task)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to configure vTPM device on virtual machine due to '%s'" % to_native(e))
+ if task.info.state == 'error':
+ self.module.fail_json(msg='Failed to reconfigure VM with vTPM device', detail=task.info.error.msg)
+ results['changed'] = True
+ results['vtpm_info'] = self.get_vtpm_info(vm_obj=self.vm)
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ folder=dict(type='str'),
+ datacenter=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'uuid', 'moid']],
+ )
+ if module.params['folder']:
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ vm_config_vtpm = PyVmomiHelper(module)
+ vm = vm_config_vtpm.get_vm()
+ if not vm:
+ vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
+ module.fail_json(msg="Unable to configure vTPM device for non-existing virtual machine '%s'." % vm_id)
+ try:
+ vm_config_vtpm.vtpm_operation(vm_obj=vm)
+ except Exception as e:
+ module.fail_json(msg="Failed to configure vTPM device of virtual machine '%s' with exception : %s"
+ % (vm.name, to_native(e)))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_vgpu.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_vgpu.py
new file mode 100644
index 000000000..835f9d186
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_vgpu.py
@@ -0,0 +1,390 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Diane Wang <dianew@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: vmware_guest_vgpu
+short_description: Modify vGPU video card profile of the specified virtual machine in the given vCenter infrastructure
+description:
+ - This module is used to reconfigure vGPU card profile of the given virtual machine.
+ - All parameters and VMware object names are case sensitive.
+ - VM must be power off M(community.vmware.vmware_guest_powerstate) module can perform that task.
+author:
+ - Mohamed Alibi (@Medalibi)
+ - Unknown (@matancarmeli7)
+options:
+ name:
+ description:
+ - Name of the virtual machine.
+ - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to gather facts if known, this is VMware's unique identifier.
+ - This is a required parameter, if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is a required parameter, only if multiple VMs are found with same name.
+ - The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ default: ha-datacenter
+ description:
+ - The datacenter name to which virtual machine belongs to.
+ - This parameter is case sensitive.
+ type: str
+ state:
+ default: present
+ choices: [ 'present', 'absent' ]
+ description:
+ - vGPU profile state.
+ - When C(state=present), the selected vGPU profile will be added if the VM hosted ESXi host NVIDIA GPU offer it.
+ - When C(state=absent), the selected vGPU profile gets removed from the VM.
+ type: str
+ vgpu:
+ description:
+ - A supported vGPU profile depending on the GPU model. Required for any operation.
+ type: str
+ force:
+ description:
+ - Force operation.
+ default: false
+ type: bool
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ cluster:
+ description:
+ - The cluster name where the virtual machine is running.
+ type: str
+ esxi_hostname:
+ description:
+ - The ESXi hostname where the virtual machine is running.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+version_added: '2.5.0'
+"""
+
+EXAMPLES = r"""
+- name: Add vGPU profile to VM
+ community.vmware.vmware_guest_vgpu:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ validate_certs: false
+ name: UbuntuTest
+ vgpu: 'grid_m10-8q'
+ state: present
+ delegate_to: localhost
+ register: vgpu_info
+
+- name: Remove vGPU profile to VM
+ community.vmware.vmware_guest_vgpu:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ validate_certs: false
+ name: UbuntuTest
+ vgpu: 'grid_m10-8q'
+ state: absent
+ delegate_to: localhost
+ register: vgpu_info
+"""
+
+RETURN = r"""
+vgpu_info:
+ description: metadata about the virtual machine's vGPU profile
+ returned: always
+ type: dict
+ sample: {
+ "vgpu": {
+ "Controller_Key": 100,
+ "Key": 13000,
+ "Label": "PCI device 0",
+ "Summary": "NVIDIA GRID vGPU grid_m10-8q",
+ "Unit_Number": 18,
+ "Vgpu": "grid_m10-8q"
+ }
+ }
+"""
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ vmware_argument_spec,
+ wait_for_task,
+)
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+
+ def _gather_vgpu_profile_facts(self, vm_obj):
+ """
+ Gather facts about VM's vGPU profile settings
+ Args:
+ vm_obj: Managed object of virtual machine
+ Returns: vGPU profile and its facts
+ """
+ vgpu_info = dict()
+ for vgpu_VirtualDevice_obj in vm_obj.config.hardware.device:
+ if hasattr(vgpu_VirtualDevice_obj.backing, "vgpu"):
+ vgpu_info = dict(
+ Vgpu=vgpu_VirtualDevice_obj.backing.vgpu,
+ Key=vgpu_VirtualDevice_obj.key,
+ Summary=vgpu_VirtualDevice_obj.deviceInfo.summary,
+ Label=vgpu_VirtualDevice_obj.deviceInfo.label,
+ Unit_Number=vgpu_VirtualDevice_obj.unitNumber,
+ Controller_Key=vgpu_VirtualDevice_obj.controllerKey,
+ )
+ break
+ return vgpu_info
+
+ def _vgpu_absent(self, vm_obj):
+ """
+ Remove vGPU profile of virtual machine.
+ Args:
+ vm_obj: Managed object of virtual machine
+ Returns: Operation results and vGPU facts
+ """
+ result = {}
+ vgpu_prfl = self.params["vgpu"]
+ vgpu_VirtualDevice_obj = self._get_vgpu_VirtualDevice_object(vm_obj, vgpu_prfl)
+ if vgpu_VirtualDevice_obj is None:
+ changed = False
+ failed = False
+ else:
+ vgpu_fact = self._gather_vgpu_profile_facts(vm_obj)
+ changed, failed = self._remove_vgpu_profile_from_vm(
+ vm_obj, vgpu_VirtualDevice_obj, vgpu_prfl
+ )
+ result = {"changed": changed, "failed": failed, "vgpu": vgpu_fact}
+ return result
+
+ def _remove_vgpu_profile_from_vm(self, vm_obj, vgpu_VirtualDevice_obj, vgpu_prfl):
+ """
+ Remove vGPU profile of virtual machine
+ Args:
+ vm_obj: Managed object of virtual machine
+ vgpu_VirtualDevice_obj: vGPU profile object holding its facts
+ vgpu_prfl: vGPU profile name
+ Returns: Operation results
+ """
+ changed = False
+ failed = False
+ vm_current_vgpu_profile = self._get_vgpu_profile_in_the_vm(vm_obj)
+ if vgpu_prfl in vm_current_vgpu_profile:
+ vdspec = vim.vm.device.VirtualDeviceSpec()
+ vmConfigSpec = vim.vm.ConfigSpec()
+ vdspec.operation = "remove"
+ vdspec.device = vgpu_VirtualDevice_obj
+ vmConfigSpec.deviceChange.append(vdspec)
+
+ try:
+ task = vm_obj.ReconfigVM_Task(spec=vmConfigSpec)
+ wait_for_task(task)
+ changed = True
+ return changed, failed
+ except Exception as exc:
+ failed = True
+ self.module.fail_json(
+ msg="Failed to delete vGPU profile"
+ " '%s' from vm %s." % (vgpu_prfl, vm_obj.name),
+ detail=exc.msg,
+ )
+ return changed, failed
+
+ def _vgpu_present(self, vm_obj):
+ """
+ Add vGPU profile to virtual machine.
+ Args:
+ vm_obj: Managed object of virtual machine
+ Returns: Operation results and vGPU facts
+ """
+ result = {}
+ vgpu_prfl = self.params["vgpu"]
+ vgpu_profile_name = self._get_vgpu_profiles_name(vm_obj, vgpu_prfl)
+ if vgpu_profile_name is None:
+ self.module.fail_json(
+ msg="vGPU Profile '%s'" " does not exist." % vgpu_prfl
+ )
+
+ changed, failed = self._add_vgpu_profile_to_vm(
+ vm_obj, vgpu_profile_name, vgpu_prfl
+ )
+ vgpu_fact = self._gather_vgpu_profile_facts(vm_obj)
+ result = {"changed": changed, "failed": failed, "vgpu": vgpu_fact}
+ return result
+
+ def _add_vgpu_profile_to_vm(self, vm_obj, vgpu_profile_name, vgpu_prfl):
+ """
+ Add vGPU profile of virtual machine
+ Args:
+ vm_obj: Managed object of virtual machine
+ vgpu_profile_name: vGPU profile object name from ESXi server list
+ vgpu_prfl: vGPU profile name
+ Returns: Operation results
+ """
+ changed = False
+ failed = False
+ vm_current_vgpu_profile = self._get_vgpu_profile_in_the_vm(vm_obj)
+ if self.params["force"] or vgpu_prfl not in vm_current_vgpu_profile:
+ vgpu_p = vgpu_profile_name.vgpu
+ backing = vim.VirtualPCIPassthroughVmiopBackingInfo(vgpu=vgpu_p)
+ summary = "NVIDIA GRID vGPU " + vgpu_prfl
+ deviceInfo = vim.Description(summary=summary, label="PCI device 0")
+ hba_object = vim.VirtualPCIPassthrough(
+ backing=backing, deviceInfo=deviceInfo
+ )
+ new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
+ new_device_config.operation = "add"
+ vmConfigSpec = vim.vm.ConfigSpec()
+ vmConfigSpec.deviceChange = [new_device_config]
+ vmConfigSpec.memoryReservationLockedToMax = True
+
+ try:
+ task = vm_obj.ReconfigVM_Task(spec=vmConfigSpec)
+ wait_for_task(task)
+ changed = True
+ except Exception as exc:
+ failed = True
+ self.module.fail_json(
+ msg="Failed to add vGPU Profile"
+ " '%s' to vm %s." % (vgpu_prfl, vm_obj.name),
+ detail=exc.msg,
+ )
+ else:
+ return changed, failed
+ return changed, failed
+
+ def _get_vgpu_profile_in_the_vm(self, vm_obj):
+ """
+ Get vGPU profile object of virtual machine
+ Args:
+ vm_obj: Managed object of virtual machine
+ Returns: vGPU profile name
+ """
+ vm_current_vgpu_profile = []
+ for vgpu_VirtualDevice_obj in vm_obj.config.hardware.device:
+ if hasattr(vgpu_VirtualDevice_obj.backing, "vgpu"):
+ vm_current_vgpu_profile.append(vgpu_VirtualDevice_obj.backing.vgpu)
+ return vm_current_vgpu_profile
+
+ def _get_vgpu_VirtualDevice_object(self, vm_obj, vgpu_prfl):
+ """
+ Get current vGPU profile object of virtual machine
+ Args:
+ vm_obj: Managed object of virtual machine
+ vgpu_prfl: vGPU profile name
+ Returns: vGPU profile name of virtual machine
+ """
+ for vgpu_VirtualDevice_obj in vm_obj.config.hardware.device:
+ if hasattr(vgpu_VirtualDevice_obj.backing, "vgpu"):
+ if vgpu_VirtualDevice_obj.backing.vgpu == vgpu_prfl:
+ return vgpu_VirtualDevice_obj
+ return None
+
+ def _get_vgpu_profiles_name(self, vm_obj, vgpu_prfl):
+ """
+ Get matched vGPU profile object of ESXi host
+ Args:
+ vm_obj: Managed object of virtual machine
+ vgpu_prfl: vGPU profile name
+ Returns: vGPU profile object
+ """
+ vm_host = vm_obj.runtime.host
+ vgpu_profiles = vm_host.config.sharedGpuCapabilities
+ for vgpu_profile_name in vgpu_profiles:
+ if vgpu_profile_name.vgpu == vgpu_prfl:
+ return vgpu_profile_name
+ return None
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type="str"),
+ uuid=dict(type="str"),
+ use_instance_uuid=dict(type="bool", default=False),
+ moid=dict(type="str"),
+ folder=dict(type="str"),
+ datacenter=dict(type="str", default="ha-datacenter"),
+ esxi_hostname=dict(type="str"),
+ cluster=dict(type="str"),
+ vgpu=dict(type="str"),
+ force=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[["cluster", "esxi_hostname"]],
+ required_one_of=[["name", "uuid", "moid"]],
+ )
+
+ pyv = PyVmomiHelper(module)
+ vm = pyv.get_vm()
+
+ if not vm:
+ vm_id = (
+ module.params.get("uuid")
+ or module.params.get("name")
+ or module.params.get("moid")
+ )
+ module.fail_json(
+ msg="Unable to manage vGPU profile for non-existing VM %s" % vm_id
+ )
+
+ if module.params["state"] == "present":
+ result = pyv._vgpu_present(vm)
+ elif module.params["state"] == "absent":
+ result = pyv._vgpu_absent(vm)
+
+ if "failed" not in result:
+ result["failed"] = False
+
+ if result["failed"]:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_vgpu_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_vgpu_info.py
new file mode 100644
index 000000000..9dc12eea7
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_vgpu_info.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Diane Wang <dianew@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: vmware_guest_vgpu_info
+version_added: '3.3.0'
+short_description: Gather information about vGPU profiles of the specified virtual machine in the given vCenter infrastructure
+description:
+ - This module is used to gather metadata about vGPU profiles of the given virtual machine.
+ - All parameters and VMware object names are case sensitive.
+author:
+ - Jared Priddy (@jdptechnc)
+options:
+ name:
+ description:
+ - Name of the virtual machine.
+ - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to gather facts if known, this is VMware's unique identifier.
+ - This is a required parameter, if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is a required parameter, only if multiple VMs are found with same name.
+ - The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ default: ha-datacenter
+ description:
+ - The datacenter name to which virtual machine belongs to.
+ - This parameter is case sensitive.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+"""
+
+EXAMPLES = r"""
+- name: Gather information about vGPU profiles of a VM
+ community.vmware.vmware_guest_vgpu_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ validate_certs: false
+ name: UbuntuTest
+ delegate_to: localhost
+ register: vgpu_info
+
+"""
+
+RETURN = r"""
+vgpu_info:
+ description: metadata about the virtual machine's vGPU profiles
+ returned: always
+ type: list
+ sample: {
+ "vgpu": [
+ {
+ "Controller_Key": 100,
+ "Key": 13000,
+ "Label": "PCI device 0",
+ "Summary": "NVIDIA GRID vGPU grid_m10-8q",
+ "Unit_Number": 18,
+ "Vgpu": "grid_m10-8q"
+ }
+ ]
+ }
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ vmware_argument_spec
+)
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+
+ def gather_vgpu_profile_facts(self, vm_obj):
+ """
+ Gather facts about VM's vGPU profile settings
+ Args:
+ vm_obj: Managed object of virtual machine
+ Returns: list of vGPU profiles with facts
+ """
+ vgpu_info = []
+ for vgpu_VirtualDevice_obj in vm_obj.config.hardware.device:
+ if hasattr(vgpu_VirtualDevice_obj.backing, "vgpu"):
+ vgpu = dict(
+ Vgpu=vgpu_VirtualDevice_obj.backing.vgpu,
+ Key=vgpu_VirtualDevice_obj.key,
+ Summary=vgpu_VirtualDevice_obj.deviceInfo.summary,
+ Label=vgpu_VirtualDevice_obj.deviceInfo.label,
+ Unit_Number=vgpu_VirtualDevice_obj.unitNumber,
+ Controller_Key=vgpu_VirtualDevice_obj.controllerKey,
+ )
+ vgpu_info.append(vgpu)
+ return vgpu_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type="str"),
+ uuid=dict(type="str"),
+ use_instance_uuid=dict(type="bool", default=False),
+ moid=dict(type="str"),
+ folder=dict(type="str"),
+ datacenter=dict(type="str", default="ha-datacenter"),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[["name", "uuid", "moid"]],
+ supports_check_mode=True,
+ )
+
+ pyv = PyVmomiHelper(module)
+ vm = pyv.get_vm()
+
+ if not vm:
+ vm_id = (
+ module.params.get("uuid")
+ or module.params.get("name")
+ or module.params.get("moid")
+ )
+ module.fail_json(
+ msg="Unable to gather vGPU information for non-existing VM %s" % vm_id
+ )
+ else:
+ try:
+ module.exit_json(vgpu=pyv.gather_vgpu_profile_facts(vm))
+ except Exception as exc:
+ module.fail_json(msg="Failed to gather information with exception : %s" % to_text(exc))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_guest_video.py b/ansible_collections/community/vmware/plugins/modules/vmware_guest_video.py
new file mode 100644
index 000000000..253a1ac74
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_guest_video.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Diane Wang <dianew@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest_video
+short_description: Modify video card configurations of specified virtual machine in given vCenter infrastructure
+description:
+ - This module is used to reconfigure video card settings of given virtual machine.
+ - All parameters and VMware object names are case sensitive.
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+options:
+ name:
+ description:
+ - Name of the virtual machine.
+ - This is a required parameter, if parameter C(uuid) or C(moid) is not supplied.
+ type: str
+ uuid:
+ description:
+ - UUID of the instance to gather facts if known, this is VMware's unique identifier.
+ - This is a required parameter, if parameter C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest.
+ - This is a required parameter, only if multiple VMs are found with same name.
+ - The folder should include the datacenter. ESXi server's datacenter is ha-datacenter.
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ datacenter:
+ default: ha-datacenter
+ description:
+ - The datacenter name to which virtual machine belongs to.
+ - This parameter is case sensitive.
+ type: str
+ gather_video_facts:
+ description:
+ - If set to C(true), return settings of the video card, other attributes are ignored.
+ - If set to C(false), will do reconfiguration and return video card settings.
+ type: bool
+ default: false
+ use_auto_detect:
+ description:
+ - 'If set to C(true), applies common video settings to the guest operating system, attributes C(display_number) and C(video_memory_mb) are ignored.'
+ - 'If set to C(false), the number of display and the total video memory will be reconfigured using C(display_number) and C(video_memory_mb).'
+ type: bool
+ display_number:
+ description:
+ - The number of display. Valid value from 1 to 10. The maximum display number is 4 on vCenter 6.0, 6.5 web UI.
+ type: int
+ video_memory_mb:
+ description:
+ - 'Valid total MB of video memory range of virtual machine is from 1.172 MB to 256 MB on ESXi 6.7U1,
+ from 1.172 MB to 128 MB on ESXi 6.7 and previous versions.'
+ - For specific guest OS, supported minimum and maximum video memory are different, please be careful on setting this.
+ type: float
+ enable_3D:
+ description:
+ - Enable 3D for guest operating systems on which VMware supports 3D.
+ type: bool
+ renderer_3D:
+ description:
+ - 'If set to C(automatic), selects the appropriate option (software or hardware) for this virtual machine automatically.'
+ - 'If set to C(software), uses normal CPU processing for 3D calculations.'
+ - 'If set to C(hardware), requires graphics hardware (GPU) for faster 3D calculations.'
+ choices: [ automatic, software, hardware ]
+ type: str
+ memory_3D_mb:
+ description:
+ - The value of 3D Memory must be power of 2 and valid value is from 32 MB to 2048 MB.
+ type: int
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Change video card settings of virtual machine
+ community.vmware.vmware_guest_video:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ name: test-vm
+ gather_video_facts: false
+ use_auto_detect: false
+ display_number: 2
+ video_memory_mb: 8.0
+ enable_3D: true
+ renderer_3D: automatic
+ memory_3D_mb: 512
+ delegate_to: localhost
+ register: video_facts
+
+- name: Change video card settings of virtual machine using MoID
+ community.vmware.vmware_guest_video:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ moid: vm-42
+ gather_video_facts: false
+ use_auto_detect: false
+ display_number: 2
+ video_memory_mb: 8.0
+ enable_3D: true
+ renderer_3D: automatic
+ memory_3D_mb: 512
+ delegate_to: localhost
+ register: video_facts
+
+- name: Gather video card settings of virtual machine
+ community.vmware.vmware_guest_video:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter_name }}"
+ name: test-vm
+ gather_video_facts: false
+ delegate_to: localhost
+ register: video_facts
+'''
+
+RETURN = r'''
+video_status:
+ description: metadata about the virtual machine's video card after managing them
+ returned: always
+ type: dict
+ sample: {
+ "auto_detect": false,
+ "display_number": 2,
+ "enable_3D_support": true,
+ "memory_3D": 524288,
+ "renderer_3D": "automatic",
+ "video_memory": 8192
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.change_detected = False
+ self.config_spec = vim.vm.ConfigSpec()
+ self.config_spec.deviceChange = []
+ self.video_card_facts = None
+
+ @staticmethod
+ def is_power_of_2(num):
+ return num != 0 and ((num & (num - 1)) == 0)
+
+ def gather_video_card_facts(self, vm_obj):
+ """
+ Gather facts about VM's video card settings
+ Args:
+ vm_obj: Managed object of virtual machine
+ Returns: Video Card device and a list of dict video card configuration
+ """
+ video_facts = dict()
+ video_card = None
+ for device in vm_obj.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualVideoCard):
+ video_card = device
+ video_facts = dict(
+ auto_detect=device.useAutoDetect,
+ display_number=device.numDisplays,
+ video_memory=device.videoRamSizeInKB,
+ enable_3D_support=device.enable3DSupport,
+ renderer_3D=device.use3dRenderer,
+ memory_3D=device.graphicsMemorySizeInKB,
+ )
+ break
+ return video_card, video_facts
+
+ def get_video_card_spec(self, vm_obj):
+ """
+ Get device changes of virtual machine
+ Args:
+ vm_obj: Managed object of virtual machine
+ Returns: virtual device spec
+ """
+ video_card, video_card_facts = self.gather_video_card_facts(vm_obj)
+ self.video_card_facts = video_card_facts
+ if video_card is None:
+ self.module.fail_json(msg='Unable to get video card device for the specified virtual machine.')
+ video_spec = vim.vm.device.VirtualDeviceSpec()
+ video_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ video_spec.device = video_card
+ auto_detect = False
+ enabled_3d = False
+
+ if self.params['gather_video_facts']:
+ return None
+ if self.params['use_auto_detect'] is not None:
+ if video_card_facts['auto_detect'] and self.params['use_auto_detect']:
+ auto_detect = True
+ elif not video_card_facts['auto_detect'] and self.params['use_auto_detect']:
+ video_spec.device.useAutoDetect = True
+ self.change_detected = True
+ auto_detect = True
+ elif video_card_facts['auto_detect'] and not self.params['use_auto_detect']:
+ video_spec.device.useAutoDetect = False
+ self.change_detected = True
+ else:
+ if video_card_facts['auto_detect']:
+ auto_detect = True
+ # useAutoDetect set to False then display number and video memory config can be changed
+ if not auto_detect:
+ if self.params['display_number'] is not None:
+ if self.params['display_number'] < 1:
+ self.module.fail_json(msg="display_number attribute valid value: 1-10.")
+ if self.params['display_number'] != video_card_facts['display_number']:
+ video_spec.device.numDisplays = self.params['display_number']
+ self.change_detected = True
+
+ if self.params['video_memory_mb'] is not None:
+ if self.params['video_memory_mb'] < 1.172:
+ self.module.fail_json(msg="video_memory_mb attribute valid value: ESXi 6.7U1(1.172-256 MB),"
+ "ESXi 6.7/6.5/6.0(1.172-128 MB).")
+ if int(self.params['video_memory_mb'] * 1024) != video_card_facts['video_memory']:
+ video_spec.device.videoRamSizeInKB = int(self.params['video_memory_mb'] * 1024)
+ self.change_detected = True
+ else:
+ if self.params['display_number'] is not None or self.params['video_memory_mb'] is not None:
+ self.module.fail_json(msg="display_number and video_memory_mb can not be changed if use_auto_detect is true.")
+ # useAutoDetect value not control 3D config
+ if self.params['enable_3D'] is not None:
+ if self.params['enable_3D'] != video_card_facts['enable_3D_support']:
+ video_spec.device.enable3DSupport = self.params['enable_3D']
+ self.change_detected = True
+ if self.params['enable_3D']:
+ enabled_3d = True
+ else:
+ if video_card_facts['enable_3D_support']:
+ enabled_3d = True
+ else:
+ if video_card_facts['enable_3D_support']:
+ enabled_3d = True
+ # 3D is enabled then 3D memory and renderer method can be set
+ if enabled_3d:
+ if self.params['renderer_3D'] is not None:
+ renderer = self.params['renderer_3D'].lower()
+ if renderer not in ['automatic', 'software', 'hardware']:
+ self.module.fail_json(msg="renderer_3D attribute valid value: automatic, software, hardware.")
+ if renderer != video_card_facts['renderer_3D']:
+ video_spec.device.use3dRenderer = renderer
+ self.change_detected = True
+ if self.params['memory_3D_mb'] is not None:
+ memory_3d = self.params['memory_3D_mb']
+ if not self.is_power_of_2(memory_3d):
+ self.module.fail_json(msg="memory_3D_mb attribute should be an integer value and power of 2(32-2048).")
+ else:
+ if memory_3d < 32 or memory_3d > 2048:
+ self.module.fail_json(msg="memory_3D_mb attribute should be an integer value and power of 2(32-2048).")
+ if memory_3d * 1024 != video_card_facts['memory_3D']:
+ video_spec.device.graphicsMemorySizeInKB = memory_3d * 1024
+ self.change_detected = True
+ else:
+ if self.params['renderer_3D'] is not None or self.params['memory_3D_mb'] is not None:
+ self.module.fail_json(msg='3D renderer or 3D memory can not be configured if 3D is not enabled.')
+ if not self.change_detected:
+ return None
+ return video_spec
+
+ def reconfigure_vm_video(self, vm_obj):
+ """
+ Reconfigure video card settings of virtual machine
+ Args:
+ vm_obj: Managed object of virtual machine
+ Returns: Reconfigure results
+ """
+ video_card_spec = self.get_video_card_spec(vm_obj)
+ if video_card_spec is None:
+ return {'changed': False, 'failed': False, 'instance': self.video_card_facts}
+ self.config_spec.deviceChange.append(video_card_spec)
+ try:
+ task = vm_obj.ReconfigVM_Task(spec=self.config_spec)
+ wait_for_task(task)
+ except vim.fault.InvalidDeviceSpec as invalid_device_spec:
+ self.module.fail_json(msg="Failed to configure video card on given virtual machine due to invalid"
+ " device spec : %s" % (to_native(invalid_device_spec.msg)),
+ details="Please check ESXi server logs for more details.")
+ except vim.fault.RestrictedVersion as e:
+ self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
+ " product versioning restrictions: %s" % to_native(e.msg))
+ if task.info.state == 'error':
+ return {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg}
+ video_card_facts = self.gather_video_card_facts(vm_obj)[1]
+ return {'changed': self.change_detected, 'failed': False, 'instance': video_card_facts}
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ folder=dict(type='str'),
+ datacenter=dict(type='str', default='ha-datacenter'),
+ gather_video_facts=dict(type='bool', default=False),
+ use_auto_detect=dict(type='bool'),
+ display_number=dict(type='int'),
+ video_memory_mb=dict(type='float'),
+ enable_3D=dict(type='bool'),
+ renderer_3D=dict(type='str', choices=['automatic', 'software', 'hardware']),
+ memory_3D_mb=dict(type='int'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ]
+ )
+
+ pyv = PyVmomiHelper(module)
+ vm = pyv.get_vm()
+ if not vm:
+ vm_id = module.params.get('uuid') or module.params.get('name') or module.params.get('moid')
+ module.fail_json(msg='Unable to find the specified virtual machine : %s' % vm_id)
+
+ vm_facts = pyv.gather_facts(vm)
+ vm_power_state = vm_facts['hw_power_status'].lower()
+ gather_video_facts = module.params.get('gather_video_facts') or False
+ if vm_power_state != 'poweredoff':
+ if not gather_video_facts:
+ module.fail_json(msg='VM state should be poweredoff to reconfigure video card settings.')
+ result = pyv.reconfigure_vm_video(vm_obj=vm)
+ if result['failed']:
+ module.fail_json(**result)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host.py b/ansible_collections/community/vmware/plugins/modules/vmware_host.py
new file mode 100644
index 000000000..284d50ea8
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host.py
@@ -0,0 +1,830 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2017, Ansible Project
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host
+short_description: Add, remove, or move an ESXi host to, from, or within vCenter
+description:
+- This module can be used to add, reconnect, or remove an ESXi host to or from vCenter.
+- This module can also be used to move an ESXi host to a cluster or folder, or vice versa, within the same datacenter.
+author:
+- Joseph Callen (@jcpowermac)
+- Russell Teague (@mtnbikenc)
+- Maxime de Roucy (@tchernomax)
+- Christian Kotte (@ckotte)
+options:
+ datacenter_name:
+ description:
+ - Name of the datacenter to add the host.
+ - Aliases added in version 2.6.
+ required: true
+ aliases: ['datacenter']
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster to add the host.
+ - If C(folder) is not set, then this parameter is required.
+ - Aliases added in version 2.6.
+ aliases: ['cluster']
+ type: str
+ folder:
+ description:
+ - Name of the folder under which host to add.
+ - If C(cluster_name) is not set, then this parameter is required.
+ - "For example, if there is a datacenter 'dc1' under folder called 'Site1' then, this value will be '/Site1/dc1/host'."
+ - "Here 'host' is an invisible folder under VMware Web Client."
+ - "Another example, if there is a nested folder structure like '/myhosts/india/pune' under
+ datacenter 'dc2', then C(folder) value will be '/dc2/host/myhosts/india/pune'."
+ - "Other Examples: '/Site2/dc2/Asia-Cluster/host' or '/dc3/Asia-Cluster/host'"
+ aliases: ['folder_name']
+ type: str
+ add_connected:
+ description:
+ - If set to C(true), then the host should be connected as soon as it is added.
+ - This parameter is ignored if state is set to a value other than C(present).
+ default: true
+ type: bool
+ esxi_hostname:
+ description:
+ - ESXi hostname to manage.
+ required: true
+ type: str
+ esxi_username:
+ description:
+ - ESXi username.
+ - Required for adding a host.
+ - Optional for reconnect. If both C(esxi_username) and C(esxi_password) are used
+ - Unused for removing.
+ - No longer a required parameter from version 2.5.
+ type: str
+ esxi_password:
+ description:
+ - ESXi password.
+ - Required for adding a host.
+ - Optional for reconnect.
+ - Unused for removing.
+ - No longer a required parameter from version 2.5.
+ type: str
+ state:
+ description:
+ - If set to C(present), add the host if host is absent.
+ - If set to C(present), update the location of the host if host already exists.
+ - If set to C(absent), remove the host if host is present.
+ - If set to C(absent), do nothing if host already does not exists.
+ - If set to C(add_or_reconnect), add the host if it's absent else reconnect it and update the location.
+ - If set to C(reconnect), then reconnect the host if it's present and update the location.
+ - If set to C(disconnected), disconnect the host if the host already exists.
+ default: present
+ choices: ['present', 'absent', 'add_or_reconnect', 'reconnect', 'disconnected']
+ type: str
+ esxi_ssl_thumbprint:
+ description:
+ - "Specifying the hostsystem certificate's thumbprint."
+ - "Use following command to get hostsystem certificate's thumbprint - "
+ - "# openssl x509 -in /etc/vmware/ssl/rui.crt -fingerprint -sha1 -noout"
+ - Only used if C(fetch_thumbprint) isn't set to C(true).
+ default: ''
+ type: str
+ aliases: ['ssl_thumbprint']
+ fetch_ssl_thumbprint:
+ description:
+ - Fetch the thumbprint of the host's SSL certificate.
+ - This basically disables the host certificate verification (check if it was signed by a recognized CA).
+ - Disable this option if you want to allow only hosts with valid certificates to be added to vCenter.
+ - If this option is set to C(false) and the certificate can't be verified, an add or reconnect will fail.
+ - Unused when C(esxi_ssl_thumbprint) is set.
+ - Optional for reconnect, but only used if C(esxi_username) and C(esxi_password) are used.
+ - Unused for removing.
+ type: bool
+ default: true
+ force_connection:
+ description:
+ - Force the connection if the host is already being managed by another vCenter server.
+ type: bool
+ default: true
+ reconnect_disconnected:
+ description:
+ - Reconnect disconnected hosts.
+ - This is only used if C(state) is set to C(present) and if the host already exists.
+ type: bool
+ default: true
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add ESXi Host to vCenter
+ community.vmware.vmware_host:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: datacenter_name
+ cluster: cluster_name
+ esxi_hostname: '{{ esxi_hostname }}'
+ esxi_username: '{{ esxi_username }}'
+ esxi_password: '{{ esxi_password }}'
+ state: present
+ delegate_to: localhost
+
+- name: Add ESXi Host to vCenter under a specific folder
+ community.vmware.vmware_host:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: datacenter_name
+ folder: '/Site2/Asia-Cluster/host'
+ esxi_hostname: '{{ esxi_hostname }}'
+ esxi_username: '{{ esxi_username }}'
+ esxi_password: '{{ esxi_password }}'
+ state: present
+ add_connected: true
+ delegate_to: localhost
+
+- name: Reconnect ESXi Host (with username/password set)
+ community.vmware.vmware_host:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: datacenter_name
+ cluster: cluster_name
+ esxi_hostname: '{{ esxi_hostname }}'
+ esxi_username: '{{ esxi_username }}'
+ esxi_password: '{{ esxi_password }}'
+ state: reconnect
+ delegate_to: localhost
+
+- name: Reconnect ESXi Host (with default username/password)
+ community.vmware.vmware_host:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: datacenter_name
+ cluster: cluster_name
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: reconnect
+ delegate_to: localhost
+
+- name: Add ESXi Host with SSL Thumbprint to vCenter
+ community.vmware.vmware_host:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: datacenter_name
+ cluster: cluster_name
+ esxi_hostname: '{{ esxi_hostname }}'
+ esxi_username: '{{ esxi_username }}'
+ esxi_password: '{{ esxi_password }}'
+ esxi_ssl_thumbprint: "3C:A5:60:6F:7A:B7:C4:6C:48:28:3D:2F:A5:EC:A3:58:13:88:F6:DD"
+ state: present
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: metadata about the new host system added
+ returned: on successful addition
+ type: str
+ sample: "Host already connected to vCenter 'vcenter01' in cluster 'cluster01'"
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi, TaskError, vmware_argument_spec,
+ wait_for_task, find_host_by_cluster_datacenter, find_hostsystem_by_name
+)
+
+
+class VMwareHost(PyVmomi):
+ """Class to manage vCenter connection"""
+
+ def __init__(self, module):
+ super(VMwareHost, self).__init__(module)
+ self.vcenter = module.params['hostname']
+ self.datacenter_name = module.params['datacenter_name']
+ self.cluster_name = module.params['cluster_name']
+ self.folder_name = module.params['folder']
+ self.esxi_hostname = module.params['esxi_hostname']
+ self.esxi_username = module.params['esxi_username']
+ self.esxi_password = module.params['esxi_password']
+ self.state = module.params['state']
+ self.esxi_ssl_thumbprint = module.params.get('esxi_ssl_thumbprint', '')
+ self.force_connection = module.params.get('force_connection')
+ self.fetch_ssl_thumbprint = module.params.get('fetch_ssl_thumbprint')
+ self.reconnect_disconnected = module.params.get('reconnect_disconnected')
+ self.host_update = self.host = self.cluster = self.folder = self.host_parent_compute_resource = None
+
+ def process_state(self):
+ """Check the current state"""
+ host_states = {
+ 'absent': {
+ 'present': self.state_remove_host,
+ 'update': self.state_remove_host,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_exit_unchanged,
+ 'update': self.state_update_host,
+ 'absent': self.state_add_host,
+ },
+ 'add_or_reconnect': {
+ 'present': self.state_reconnect_host,
+ 'update': self.state_update_host,
+ 'absent': self.state_add_host,
+ },
+ 'reconnect': {
+ 'present': self.state_reconnect_host,
+ 'update': self.state_update_host,
+ },
+ 'disconnected': {
+ 'present': self.state_disconnected_host,
+ 'absent': self.state_exit_unchanged,
+ }
+ }
+
+ try:
+ host_states[self.state][self.check_host_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ def check_host_state(self):
+ """Check current state"""
+ # Check if the host is already connected to vCenter
+ self.host_update = find_hostsystem_by_name(self.content, self.esxi_hostname)
+ if self.host_update:
+ # The host name is unique in vCenter; A host with the same name cannot exist in another datacenter
+ # However, the module will fail later if the target folder/cluster is in another datacenter as the host
+ # Check if the host is connected under the target cluster
+ if self.cluster_name:
+ self.host, self.cluster = self.search_cluster(self.datacenter_name, self.cluster_name, self.esxi_hostname)
+ if self.host:
+ state = 'present'
+ else:
+ state = 'update'
+ # Check if the host is connected under the target folder
+ elif self.folder_name:
+ self.folder = self.search_folder(self.folder_name)
+ for child in self.folder.childEntity:
+ if not child or not isinstance(child, vim.ComputeResource):
+ continue
+ try:
+ if isinstance(child.host[0], vim.HostSystem) and child.name == self.esxi_hostname:
+ self.host_parent_compute_resource = child
+ self.host = child.host[0]
+ break
+ except IndexError:
+ continue
+ if self.host:
+ state = 'present'
+ else:
+ state = 'update'
+ else:
+ state = 'absent'
+ return state
+
+ def search_folder(self, folder_name):
+ """
+ Search folder in vCenter
+ Returns: folder object
+ """
+ search_index = self.content.searchIndex
+ folder_obj = search_index.FindByInventoryPath(folder_name)
+ if not (folder_obj and isinstance(folder_obj, vim.Folder)):
+ self.module.fail_json(msg="Folder '%s' not found" % folder_name)
+ return folder_obj
+
+ def search_cluster(self, datacenter_name, cluster_name, esxi_hostname):
+ """
+ Search cluster in vCenter
+ Returns: host and cluster object
+ """
+ return find_host_by_cluster_datacenter(
+ self.module, self.content, datacenter_name, cluster_name, esxi_hostname
+ )
+
+ def state_exit_unchanged(self):
+ """Exit with status message"""
+ if not self.host_update:
+ result = "Host already disconnected"
+ elif self.reconnect_disconnected and self.host_update.runtime.connectionState == 'disconnected':
+ self.state_reconnect_host()
+ else:
+ if self.folder_name:
+ result = "Host already connected to vCenter '%s' in folder '%s'" % (self.vcenter, self.folder_name)
+ elif self.cluster_name:
+ result = "Host already connected to vCenter '%s' in cluster '%s'" % (self.vcenter, self.cluster_name)
+ self.module.exit_json(changed=False, result=str(result))
+
+ def state_add_host(self):
+ """Add ESXi host to a cluster of folder in vCenter"""
+ changed = True
+ result = None
+
+ if self.module.check_mode:
+ result = "Host would be connected to vCenter '%s'" % self.vcenter
+ else:
+ host_connect_spec = self.get_host_connect_spec()
+ as_connected = self.params.get('add_connected')
+ esxi_license = None
+ resource_pool = None
+ task = None
+ if self.folder_name:
+ self.folder = self.search_folder(self.folder_name)
+ try:
+ task = self.folder.AddStandaloneHost(
+ spec=host_connect_spec, compResSpec=resource_pool,
+ addConnected=as_connected, license=esxi_license
+ )
+ except vim.fault.InvalidLogin as invalid_login:
+ self.module.fail_json(
+ msg="Cannot authenticate with the host : %s" % to_native(invalid_login)
+ )
+ except vim.fault.HostConnectFault as connect_fault:
+ self.module.fail_json(
+ msg="An error occurred during connect : %s" % to_native(connect_fault)
+ )
+ except vim.fault.DuplicateName as duplicate_name:
+ self.module.fail_json(
+ msg="The folder already contains a host with the same name : %s" %
+ to_native(duplicate_name)
+ )
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(
+ msg="An argument was specified incorrectly : %s" % to_native(invalid_argument)
+ )
+ except vim.fault.AlreadyBeingManaged as already_managed:
+ self.module.fail_json(
+ msg="The host is already being managed by another vCenter server : %s" %
+ to_native(already_managed)
+ )
+ except vmodl.fault.NotEnoughLicenses as not_enough_licenses:
+ self.module.fail_json(
+ msg="There are not enough licenses to add this host : %s" % to_native(not_enough_licenses)
+ )
+ except vim.fault.NoHost as no_host:
+ self.module.fail_json(
+ msg="Unable to contact the host : %s" % to_native(no_host)
+ )
+ except vmodl.fault.NotSupported as not_supported:
+ self.module.fail_json(
+ msg="The folder is not a host folder : %s" % to_native(not_supported)
+ )
+ except vim.fault.NotSupportedHost as host_not_supported:
+ self.module.fail_json(
+ msg="The host is running a software version that is not supported : %s" %
+ to_native(host_not_supported)
+ )
+ except vim.fault.AgentInstallFailed as agent_install:
+ self.module.fail_json(
+ msg="Error during vCenter agent installation : %s" % to_native(agent_install)
+ )
+ except vim.fault.AlreadyConnected as already_connected:
+ self.module.fail_json(
+ msg="The host is already connected to the vCenter server : %s" % to_native(already_connected)
+ )
+ except vim.fault.SSLVerifyFault as ssl_fault:
+ self.module.fail_json(
+ msg="The host certificate could not be authenticated : %s" % to_native(ssl_fault)
+ )
+ elif self.cluster_name:
+ self.host, self.cluster = self.search_cluster(
+ self.datacenter_name,
+ self.cluster_name,
+ self.esxi_hostname
+ )
+ try:
+ task = self.cluster.AddHost_Task(
+ spec=host_connect_spec, asConnected=as_connected,
+ resourcePool=resource_pool, license=esxi_license
+ )
+ except vim.fault.InvalidLogin as invalid_login:
+ self.module.fail_json(
+ msg="Cannot authenticate with the host : %s" % to_native(invalid_login)
+ )
+ except vim.fault.HostConnectFault as connect_fault:
+ self.module.fail_json(
+ msg="An error occurred during connect : %s" % to_native(connect_fault)
+ )
+ except vim.fault.DuplicateName as duplicate_name:
+ self.module.fail_json(
+ msg="The cluster already contains a host with the same name : %s" %
+ to_native(duplicate_name)
+ )
+ except vim.fault.AlreadyBeingManaged as already_managed:
+ self.module.fail_json(
+ msg="The host is already being managed by another vCenter server : %s" %
+ to_native(already_managed)
+ )
+ except vmodl.fault.NotEnoughLicenses as not_enough_licenses:
+ self.module.fail_json(
+ msg="There are not enough licenses to add this host : %s" % to_native(not_enough_licenses)
+ )
+ except vim.fault.NoHost as no_host:
+ self.module.fail_json(
+ msg="Unable to contact the host : %s" % to_native(no_host)
+ )
+ except vim.fault.NotSupportedHost as host_not_supported:
+ self.module.fail_json(
+ msg="The host is running a software version that is not supported; "
+ "It may still be possible to add the host as a stand-alone host : %s" %
+ to_native(host_not_supported)
+ )
+ except vim.fault.TooManyHosts as too_many_hosts:
+ self.module.fail_json(
+ msg="No additional hosts can be added to the cluster : %s" % to_native(too_many_hosts)
+ )
+ except vim.fault.AgentInstallFailed as agent_install:
+ self.module.fail_json(
+ msg="Error during vCenter agent installation : %s" % to_native(agent_install)
+ )
+ except vim.fault.AlreadyConnected as already_connected:
+ self.module.fail_json(
+ msg="The host is already connected to the vCenter server : %s" % to_native(already_connected)
+ )
+ except vim.fault.SSLVerifyFault as ssl_fault:
+ self.module.fail_json(
+ msg="The host certificate could not be authenticated : %s" % to_native(ssl_fault)
+ )
+ try:
+ changed, result = wait_for_task(task)
+ result = "Host connected to vCenter '%s'" % self.vcenter
+ except TaskError as task_error:
+ self.module.fail_json(
+ msg="Failed to add host to vCenter '%s' : %s" % (self.vcenter, to_native(task_error))
+ )
+
+ self.module.exit_json(changed=changed, result=result)
+
+ def get_host_connect_spec(self):
+ """
+ Function to return Host connection specification
+ Returns: host connection specification
+ """
+ # Get the thumbprint of the SSL certificate
+ if self.fetch_ssl_thumbprint and self.esxi_ssl_thumbprint == '':
+ sslThumbprint = self.get_cert_fingerprint(self.esxi_hostname, self.module.params['port'],
+ self.module.params['proxy_host'], self.module.params['proxy_port'])
+ else:
+ sslThumbprint = self.esxi_ssl_thumbprint
+
+ host_connect_spec = vim.host.ConnectSpec()
+ host_connect_spec.sslThumbprint = sslThumbprint
+ host_connect_spec.hostName = self.esxi_hostname
+ host_connect_spec.userName = self.esxi_username
+ host_connect_spec.password = self.esxi_password
+ host_connect_spec.force = self.force_connection
+ return host_connect_spec
+
+ def state_reconnect_host(self):
+ """Reconnect host to vCenter"""
+ changed = True
+ result = None
+
+ if self.module.check_mode:
+ result = "Host would be reconnected to vCenter '%s'" % self.vcenter
+ else:
+ self.reconnect_host(self.host)
+ result = "Host reconnected to vCenter '%s'" % self.vcenter
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def reconnect_host(self, host_object):
+ """Reconnect host to vCenter"""
+ reconnecthost_args = {}
+ reconnecthost_args['reconnectSpec'] = vim.HostSystem.ReconnectSpec()
+ reconnecthost_args['reconnectSpec'].syncState = True
+
+ if self.esxi_username and self.esxi_password:
+ # Build the connection spec as well and fetch thumbprint if enabled
+ # Useful if you reinstalled a host and it uses a new self-signed certificate
+ reconnecthost_args['cnxSpec'] = self.get_host_connect_spec()
+ try:
+ task = host_object.ReconnectHost_Task(**reconnecthost_args)
+ except vim.fault.InvalidLogin as invalid_login:
+ self.module.fail_json(
+ msg="Cannot authenticate with the host : %s" % to_native(invalid_login)
+ )
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(
+ msg="The host is not disconnected : %s" % to_native(invalid_state)
+ )
+ except vim.fault.InvalidName as invalid_name:
+ self.module.fail_json(
+ msg="The host name is invalid : %s" % to_native(invalid_name)
+ )
+ except vim.fault.HostConnectFault as connect_fault:
+ self.module.fail_json(
+ msg="An error occurred during reconnect : %s" % to_native(connect_fault)
+ )
+ except vmodl.fault.NotSupported as not_supported:
+ self.module.fail_json(
+ msg="No host can be added to this group : %s" % to_native(not_supported)
+ )
+ except vim.fault.AlreadyBeingManaged as already_managed:
+ self.module.fail_json(
+ msg="The host is already being managed by another vCenter server : %s" % to_native(already_managed)
+ )
+ except vmodl.fault.NotEnoughLicenses as not_enough_licenses:
+ self.module.fail_json(
+ msg="There are not enough licenses to add this host : %s" % to_native(not_enough_licenses)
+ )
+ except vim.fault.NoHost as no_host:
+ self.module.fail_json(
+ msg="Unable to contact the host : %s" % to_native(no_host)
+ )
+ except vim.fault.NotSupportedHost as host_not_supported:
+ self.module.fail_json(
+ msg="The host is running a software version that is not supported : %s" %
+ to_native(host_not_supported)
+ )
+ except vim.fault.SSLVerifyFault as ssl_fault:
+ self.module.fail_json(
+ msg="The host certificate could not be authenticated : %s" % to_native(ssl_fault)
+ )
+ try:
+ changed, result = wait_for_task(task)
+ except TaskError as task_error:
+ self.module.fail_json(
+ msg="Failed to reconnect host to vCenter '%s' due to %s" %
+ (self.vcenter, to_native(task_error))
+ )
+
+ def state_remove_host(self):
+ """Remove host from vCenter"""
+ changed = True
+ result = None
+ if self.module.check_mode:
+ result = "Host would be removed from vCenter '%s'" % self.vcenter
+ else:
+ # Check parent type
+ parent_type = self.get_parent_type(self.host_update)
+ if parent_type == 'cluster':
+ self.put_host_in_maintenance_mode(self.host_update)
+ try:
+ if self.folder_name:
+ task = self.host_parent_compute_resource.Destroy_Task()
+ elif self.cluster_name:
+ task = self.host.Destroy_Task()
+ except vim.fault.VimFault as vim_fault:
+ self.module.fail_json(msg=vim_fault)
+ try:
+ changed, result = wait_for_task(task)
+ result = "Host removed from vCenter '%s'" % self.vcenter
+ except TaskError as task_error:
+ self.module.fail_json(
+ msg="Failed to remove the host from vCenter '%s' : %s" % (self.vcenter, to_native(task_error))
+ )
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def put_host_in_maintenance_mode(self, host_object):
+ """Put host in maintenance mode, if not already"""
+ if not host_object.runtime.inMaintenanceMode:
+ try:
+ try:
+ maintenance_mode_task = host_object.EnterMaintenanceMode_Task(300, True, None)
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(
+ msg="The host is already in maintenance mode : %s" % to_native(invalid_state)
+ )
+ except vim.fault.Timedout as timed_out:
+ self.module.fail_json(
+ msg="The maintenance mode operation timed out : %s" % to_native(timed_out)
+ )
+ except vim.fault.Timedout as timed_out:
+ self.module.fail_json(
+ msg="The maintenance mode operation was canceled : %s" % to_native(timed_out)
+ )
+ wait_for_task(maintenance_mode_task)
+ except TaskError as task_err:
+ self.module.fail_json(
+ msg="Failed to put the host in maintenance mode : %s" % to_native(task_err)
+ )
+
+ def get_parent_type(self, host_object):
+ """
+ Get the type of the parent object
+ Returns: string with 'folder' or 'cluster'
+ """
+ object_type = None
+ # check 'vim.ClusterComputeResource' first because it's also an
+ # instance of 'vim.ComputeResource'
+ if isinstance(host_object.parent, vim.ClusterComputeResource):
+ object_type = 'cluster'
+ elif isinstance(host_object.parent, vim.ComputeResource):
+ object_type = 'folder'
+ return object_type
+
+ def state_update_host(self):
+ """Move host to a cluster or a folder, or vice versa"""
+ changed = True
+ result = None
+ reconnect = False
+
+ # Check if the host is disconnected if reconnect disconnected hosts is true
+ if self.reconnect_disconnected and self.host_update.runtime.connectionState == 'disconnected':
+ reconnect = True
+
+ # Check parent type
+ parent_type = self.get_parent_type(self.host_update)
+
+ if self.folder_name:
+ if self.module.check_mode:
+ if reconnect or self.state == 'add_or_reconnect' or self.state == 'reconnect':
+ result = "Host would be reconnected and moved to folder '%s'" % self.folder_name
+ else:
+ result = "Host would be moved to folder '%s'" % self.folder_name
+ else:
+ # Reconnect the host if disconnected or if specified by state
+ if reconnect or self.state == 'add_or_reconnect' or self.state == 'reconnect':
+ self.reconnect_host(self.host_update)
+ try:
+ try:
+ if parent_type == 'folder':
+ # Move ESXi host from folder to folder
+ task = self.folder.MoveIntoFolder_Task([self.host_update.parent])
+ elif parent_type == 'cluster':
+ self.put_host_in_maintenance_mode(self.host_update)
+ # Move ESXi host from cluster to folder
+ task = self.folder.MoveIntoFolder_Task([self.host_update])
+ except vim.fault.DuplicateName as duplicate_name:
+ self.module.fail_json(
+ msg="The folder already contains an object with the specified name : %s" %
+ to_native(duplicate_name)
+ )
+ except vim.fault.InvalidFolder as invalid_folder:
+ self.module.fail_json(
+ msg="The parent of this folder is in the list of objects : %s" %
+ to_native(invalid_folder)
+ )
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(
+ msg="Failed to move host, this can be due to either of following :"
+ " 1. The host is not part of the same datacenter, 2. The host is not in maintenance mode : %s" %
+ to_native(invalid_state)
+ )
+ except vmodl.fault.NotSupported as not_supported:
+ self.module.fail_json(
+ msg="The target folder is not a host folder : %s" %
+ to_native(not_supported)
+ )
+ except vim.fault.DisallowedOperationOnFailoverHost as failover_host:
+ self.module.fail_json(
+ msg="The host is configured as a failover host : %s" %
+ to_native(failover_host)
+ )
+ except vim.fault.VmAlreadyExistsInDatacenter as already_exists:
+ self.module.fail_json(
+ msg="The host's virtual machines are already registered to a host in "
+ "the destination datacenter : %s" % to_native(already_exists)
+ )
+ changed, result = wait_for_task(task)
+ except TaskError as task_error_exception:
+ task_error = task_error_exception.args[0]
+ self.module.fail_json(
+ msg="Failed to move host %s to folder %s due to %s" %
+ (self.esxi_hostname, self.folder_name, to_native(task_error))
+ )
+ if reconnect or self.state == 'add_or_reconnect' or self.state == 'reconnect':
+ result = "Host reconnected and moved to folder '%s'" % self.folder_name
+ else:
+ result = "Host moved to folder '%s'" % self.folder_name
+ elif self.cluster_name:
+ if self.module.check_mode:
+ result = "Host would be moved to cluster '%s'" % self.cluster_name
+ else:
+ if parent_type == 'cluster':
+ # Put host in maintenance mode if moved from another cluster
+ self.put_host_in_maintenance_mode(self.host_update)
+ resource_pool = None
+ try:
+ try:
+ task = self.cluster.MoveHostInto_Task(
+ host=self.host_update, resourcePool=resource_pool
+ )
+ except vim.fault.TooManyHosts as too_many_hosts:
+ self.module.fail_json(
+ msg="No additional hosts can be added to the cluster : %s" % to_native(too_many_hosts)
+ )
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(
+ msg="The host is already part of a cluster and is not in maintenance mode : %s" %
+ to_native(invalid_state)
+ )
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to move host, this can be due to either of following :"
+ " 1. The host is is not a part of the same datacenter as the cluster,"
+ " 2. The source and destination clusters are the same : %s" %
+ to_native(invalid_argument)
+ )
+ changed, result = wait_for_task(task)
+ except TaskError as task_error_exception:
+ task_error = task_error_exception.args[0]
+ self.module.fail_json(
+ msg="Failed to move host to cluster '%s' due to : %s" %
+ (self.cluster_name, to_native(task_error))
+ )
+ if reconnect or self.state == 'add_or_reconnect' or self.state == 'reconnect':
+ result = "Host reconnected and moved to cluster '%s'" % self.cluster_name
+ else:
+ result = "Host moved to cluster '%s'" % self.cluster_name
+
+ self.module.exit_json(changed=changed, msg=str(result))
+
+ def state_disconnected_host(self):
+ """Disconnect host to vCenter"""
+ changed = True
+ result = None
+
+ if self.module.check_mode:
+ if self.host.runtime.connectionState == 'disconnected':
+ result = "Host already disconnected"
+ changed = False
+ else:
+ result = "Host would be disconnected host from vCenter '%s'" % self.vcenter
+ else:
+ if self.host.runtime.connectionState == 'disconnected':
+ changed = False
+ result = "Host already disconnected"
+ else:
+ self.disconnect_host(self.host)
+ result = "Host disconnected from vCenter '%s'" % self.vcenter
+ self.module.exit_json(changed=changed, result=to_native(result))
+
+ def disconnect_host(self, host_object):
+ """Disconnect host to vCenter"""
+ try:
+ task = host_object.DisconnectHost_Task()
+ except Exception as e:
+ self.module.fail_json(msg="Failed to disconnect host from vCenter: %s" % to_native(e))
+
+ try:
+ changed, result = wait_for_task(task)
+ except TaskError as task_error:
+ self.module.fail_json(
+ msg="Failed to disconnect host from vCenter '%s' due to %s" %
+ (self.vcenter, to_native(task_error))
+ )
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter_name=dict(type='str', required=True, aliases=['datacenter']),
+ cluster_name=dict(type='str', aliases=['cluster']),
+ esxi_hostname=dict(type='str', required=True),
+ esxi_username=dict(type='str'),
+ esxi_password=dict(type='str', no_log=True),
+ esxi_ssl_thumbprint=dict(type='str', default='', aliases=['ssl_thumbprint']),
+ fetch_ssl_thumbprint=dict(type='bool', default=True),
+ state=dict(default='present',
+ choices=['present', 'absent', 'add_or_reconnect', 'reconnect', 'disconnected'],
+ type='str'),
+ folder=dict(type='str', aliases=['folder_name']),
+ add_connected=dict(type='bool', default=True),
+ force_connection=dict(type='bool', default=True),
+ reconnect_disconnected=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ['state', 'present', ['esxi_username', 'esxi_password']],
+ ['state', 'add_or_reconnect', ['esxi_username', 'esxi_password']]
+ ],
+ required_one_of=[
+ ['cluster_name', 'folder'],
+ ],
+ mutually_exclusive=[
+ ['cluster_name', 'folder'],
+ ]
+ )
+
+ vmware_host = VMwareHost(module)
+ vmware_host.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_acceptance.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_acceptance.py
new file mode 100644
index 000000000..bbc989e02
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_acceptance.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_acceptance
+short_description: Manage the host acceptance level of an ESXi host
+description:
+- This module can be used to manage the host acceptance level of an ESXi host.
+- The host acceptance level controls the acceptance level of each VIB on a ESXi host.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - Acceptance level of all ESXi host system in the given cluster will be managed.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname.
+ - Acceptance level of this ESXi host system will be managed.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+ state:
+ description:
+ - Set or list acceptance level of the given ESXi host.
+ - 'If set to C(list), then will return current acceptance level of given host system/s.'
+ - If set to C(present), then will set given acceptance level.
+ choices: [ list, present ]
+ required: false
+ default: 'list'
+ type: str
+ acceptance_level:
+ description:
+ - Name of acceptance level.
+ - If set to C(partner), then accept only partner and VMware signed and certified VIBs.
+ - If set to C(vmware_certified), then accept only VIBs that are signed and certified by VMware.
+ - If set to C(vmware_accepted), then accept VIBs that have been accepted by VMware.
+ - If set to C(community), then accept all VIBs, even those that are not signed.
+ choices: [ community, partner, vmware_accepted, vmware_certified ]
+ required: false
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Set acceptance level to community for all ESXi Host in given Cluster
+ community.vmware.vmware_host_acceptance:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: cluster_name
+ acceptance_level: 'community'
+ state: present
+ delegate_to: localhost
+ register: cluster_acceptance_level
+
+- name: Set acceptance level to vmware_accepted for the given ESXi Host
+ community.vmware.vmware_host_acceptance:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ acceptance_level: 'vmware_accepted'
+ state: present
+ delegate_to: localhost
+ register: host_acceptance_level
+
+- name: Get acceptance level from the given ESXi Host
+ community.vmware.vmware_host_acceptance:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: list
+ delegate_to: localhost
+ register: host_acceptance_level
+'''
+
+RETURN = r'''
+facts:
+ description:
+ - dict with hostname as key and dict with acceptance level facts, error as value
+ returned: facts
+ type: dict
+ sample: { "facts": { "localhost.localdomain": { "error": "NA", "level": "vmware_certified" }}}
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+from ansible.module_utils._text import to_native
+
+
+class VMwareAccpetanceManager(PyVmomi):
+ def __init__(self, module):
+ super(VMwareAccpetanceManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ self.desired_state = self.params.get('state')
+ self.hosts_facts = {}
+ self.acceptance_level = self.params.get('acceptance_level')
+
+ def gather_acceptance_facts(self):
+ for host in self.hosts:
+ self.hosts_facts[host.name] = dict(level='', error='NA')
+ host_image_config_mgr = host.configManager.imageConfigManager
+ if host_image_config_mgr:
+ try:
+ self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance()
+ except vim.fault.HostConfigFault as e:
+ self.hosts_facts[host.name]['error'] = to_native(e.msg)
+
+ def set_acceptance_level(self):
+ change = []
+ for host in self.hosts:
+ host_changed = False
+ if self.hosts_facts[host.name]['level'] != self.acceptance_level:
+ host_image_config_mgr = host.configManager.imageConfigManager
+ if host_image_config_mgr:
+ try:
+ if self.module.check_mode:
+ self.hosts_facts[host.name]['level'] = self.acceptance_level
+ else:
+ host_image_config_mgr.UpdateHostImageAcceptanceLevel(newAcceptanceLevel=self.acceptance_level)
+ self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance()
+ host_changed = True
+ except vim.fault.HostConfigFault as e:
+ self.hosts_facts[host.name]['error'] = to_native(e.msg)
+
+ change.append(host_changed)
+ self.module.exit_json(changed=any(change), facts=self.hosts_facts)
+
+ def check_acceptance_state(self):
+ self.gather_acceptance_facts()
+ if self.desired_state == 'list':
+ self.module.exit_json(changed=False, facts=self.hosts_facts)
+ self.set_acceptance_level()
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ acceptance_level=dict(type='str',
+ choices=['community', 'partner', 'vmware_accepted', 'vmware_certified']
+ ),
+ state=dict(type='str',
+ choices=['list', 'present'],
+ default='list'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ required_if=[
+ ['state', 'present', ['acceptance_level']],
+ ],
+ supports_check_mode=True
+ )
+
+ vmware_host_accept_config = VMwareAccpetanceManager(module)
+ vmware_host_accept_config.check_acceptance_state()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_active_directory.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_active_directory.py
new file mode 100644
index 000000000..bc0c4a479
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_active_directory.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_active_directory
+short_description: Joins an ESXi host system to an Active Directory domain or leaves it
+description:
+- This module can be used to join or leave an ESXi host to or from an Active Directory domain.
+author:
+- Christian Kotte (@ckotte)
+options:
+ ad_domain:
+ description:
+ - AD Domain to join.
+ type: str
+ default: ''
+ aliases: [ domain, domain_name ]
+ ad_user:
+ description:
+ - Username for AD domain join.
+ type: str
+ default: ''
+ ad_password:
+ description:
+ - Password for AD domain join.
+ type: str
+ default: ''
+ ad_state:
+ description:
+ - Whether the ESXi host is joined to an AD domain or not.
+ type: str
+ choices: [ present, absent ]
+ default: 'absent'
+ aliases: [ state ]
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - This parameter is required if C(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - This parameter is required if C(esxi_hostname) is not specified.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Join an AD domain
+ community.vmware.vmware_host_active_directory:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ ad_domain: example.local
+ ad_user: adjoin
+ ad_password: Password123$
+ ad_state: present
+ delegate_to: localhost
+
+- name: Leave AD domain
+ community.vmware.vmware_host_active_directory:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ ad_state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+results:
+ description: metadata about host system's AD domain join state
+ returned: always
+ type: dict
+ sample: {
+ "esxi01": {
+ "changed": true,
+ "domain": "example.local",
+ "membership_state": "ok",
+ "msg": "Host joined to AD domain",
+ "ad_state": "present",
+ "ad_state_current": "present",
+ "ad_state_previous": "absent",
+ },
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, TaskError, vmware_argument_spec, wait_for_task
+from ansible.module_utils._text import to_native
+
+
+class VmwareHostAdAuthentication(PyVmomi):
+ """Manage Active Directory Authentication for an ESXi host system"""
+
+ def __init__(self, module):
+ super(VmwareHostAdAuthentication, self).__init__(module)
+ cluster_name = self.params.get('cluster_name')
+ esxi_host_name = self.params.get('esxi_hostname')
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system.")
+
+ def ensure(self):
+ """Manage Active Directory Authentication for an ESXi host system"""
+ results = dict(changed=False, result=dict())
+ desired_state = self.params.get('ad_state')
+ domain = self.params.get('ad_domain')
+ ad_user = self.params.get('ad_user')
+ ad_password = self.params.get('ad_password')
+ host_change_list = []
+ for host in self.hosts:
+ changed = False
+ results['result'][host.name] = dict(msg='')
+
+ active_directory_info = self.get_ad_info(host)
+
+ results['result'][host.name]['ad_state'] = desired_state
+ results['result'][host.name]['ad_domain'] = domain
+ if desired_state == 'present':
+ # Don't do anything if already enabled and joined
+ if active_directory_info.enabled:
+ # Joined and no problems with the domain membership
+ if active_directory_info.domainMembershipStatus == 'ok':
+ results['result'][host.name]['changed'] = False
+ results['result'][host.name]['membership_state'] = active_directory_info.domainMembershipStatus
+ results['result'][host.name]['joined_domain'] = active_directory_info.joinedDomain
+ results['result'][host.name]['trusted_domains'] = active_directory_info.trustedDomain
+ results['result'][host.name]['msg'] = (
+ "Host is joined to AD domain and there are no problems with the domain membership"
+ )
+ # Joined, but problems with the domain membership
+ else:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['membership_state'] = active_directory_info.domainMembershipStatus
+ results['result'][host.name]['joined_domain'] = active_directory_info.joinedDomain
+ results['result'][host.name]['trusted_domains'] = active_directory_info.trustedDomain
+ msg = host.name + " is joined to AD domain, but "
+ if active_directory_info.domainMembershipStatus == 'clientTrustBroken':
+ msg += "the client side of the trust relationship is broken"
+ elif active_directory_info.domainMembershipStatus == 'inconsistentTrust':
+ msg += "unexpected domain controller responded"
+ elif active_directory_info.domainMembershipStatus == 'noServers':
+ msg += "no domain controllers could be reached to confirm"
+ elif active_directory_info.domainMembershipStatus == 'serverTrustBroken':
+ msg += "the server side of the trust relationship is broken (or bad machine password)"
+ elif active_directory_info.domainMembershipStatus == 'otherProblem':
+ msg += "there are some problems with the domain membership"
+ elif active_directory_info.domainMembershipStatus == 'unknown':
+ msg += "the Active Directory integration provider does not support domain trust checks"
+ results['result'][host.name]['msg'] = msg
+ self.module.fail_json(msg=msg)
+ # Enable and join AD domain
+ else:
+ if self.module.check_mode:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['ad_state_previous'] = "absent"
+ results['result'][host.name]['ad_state_current'] = "present"
+ results['result'][host.name]['msg'] = "Host would be joined to AD domain"
+ else:
+ ad_authentication = self.get_ad_auth_object(host)
+ try:
+ try:
+ task = ad_authentication.JoinDomain(
+ domainName=domain, userName=ad_user, password=ad_password
+ )
+ wait_for_task(task)
+ except TaskError as task_err:
+ self.module.fail_json(
+ msg="Failed to join domain : %s" % to_native(task_err)
+ )
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['ad_state_previous'] = "absent"
+ results['result'][host.name]['ad_state_current'] = "present"
+ results['result'][host.name]['msg'] = "Host joined to AD domain"
+ active_directory_info = self.get_ad_info(host)
+ results['result'][host.name]['membership_state'] = active_directory_info.domainMembershipStatus
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(
+ msg="The host has already joined a domain : %s" % to_native(invalid_state.msg)
+ )
+ except vim.fault.HostConfigFault as host_fault:
+ self.module.fail_json(
+ msg="The host configuration prevents the join operation from succeeding : %s" %
+ to_native(host_fault.msg)
+ )
+ except vim.fault.InvalidLogin as invalid_login:
+ self.module.fail_json(
+ msg="Credentials aren't valid : %s" % to_native(invalid_login.msg)
+ )
+ except vim.fault.TaskInProgress as task_in_progress:
+ self.module.fail_json(
+ msg="The ActiveDirectoryAuthentication object is busy : %s" %
+ to_native(task_in_progress.msg)
+ )
+ except vim.fault.BlockedByFirewall as blocked_by_firewall:
+ self.module.fail_json(
+ msg="Ports needed by the join operation are blocked by the firewall : %s" %
+ to_native(blocked_by_firewall.msg)
+ )
+ except vim.fault.DomainNotFound as not_found:
+ self.module.fail_json(
+ msg="The domain controller can't be reached : %s" % to_native(not_found.msg)
+ )
+ except vim.fault.NoPermissionOnAD as no_permission:
+ self.module.fail_json(
+ msg="The specified user has no right to add hosts to the domain : %s" %
+ to_native(no_permission.msg)
+ )
+ except vim.fault.InvalidHostName as invalid_host:
+ self.module.fail_json(
+ msg="The domain part of the host's FQDN doesn't match the domain being joined : %s" %
+ to_native(invalid_host.msg)
+ )
+ except vim.fault.ClockSkew as clock_skew:
+ self.module.fail_json(
+ msg="The clocks of the host and the domain controller differ by more "
+ "than the allowed amount of time : %s" % to_native(clock_skew.msg)
+ )
+ except vim.fault.ActiveDirectoryFault as ad_fault:
+ self.module.fail_json(
+ msg="An error occurred during AD join : %s" %
+ to_native(ad_fault.msg)
+ )
+ elif desired_state == 'absent':
+ # Don't do anything not joined to any AD domain
+ if not active_directory_info.enabled:
+ results['result'][host.name]['changed'] = False
+ results['result'][host.name]['ad_state_current'] = "absent"
+ results['result'][host.name]['msg'] = "Host isn't joined to an AD domain"
+ # Disable and leave AD domain
+ else:
+ if self.module.check_mode:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['ad_state_previous'] = "present"
+ results['result'][host.name]['ad_state_current'] = "absent"
+ results['result'][host.name]['msg'] = "Host would leave the AD domain '%s'" % \
+ active_directory_info.joinedDomain
+ else:
+ ad_authentication = self.get_ad_auth_object(host)
+ try:
+ try:
+ task = ad_authentication.LeaveCurrentDomain(force=True)
+ wait_for_task(task)
+ except TaskError as task_err:
+ self.module.fail_json(
+ msg="Failed to join domain : %s" % to_native(task_err)
+ )
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['ad_state_previous'] = "present"
+ results['result'][host.name]['ad_state_current'] = "absent"
+ results['result'][host.name]['msg'] = "Host left the AD domain '%s'" % \
+ active_directory_info.joinedDomain
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(
+ msg="The host is not in a domain or there are active permissions for "
+ "Active Directory users : %s" % to_native(invalid_state.msg)
+ )
+ except vim.fault.AuthMinimumAdminPermission as admin_permission:
+ self.module.fail_json(
+ msg="This change would leave the system with no Administrator permission "
+ "on the root node : %s" % to_native(admin_permission.msg)
+ )
+ except vim.fault.TaskInProgress as task_in_progress:
+ self.module.fail_json(
+ msg="The ActiveDirectoryAuthentication object is busy : %s" %
+ to_native(task_in_progress.msg)
+ )
+ except vim.fault.NonADUserRequired as non_ad_user:
+ self.module.fail_json(
+ msg="Only non Active Directory users can initiate the leave domain operation : %s" %
+ to_native(non_ad_user.msg)
+ )
+ except vim.fault.ActiveDirectoryFault as ad_fault:
+ self.module.fail_json(
+ msg="An error occurred during AD leave : %s" %
+ to_native(ad_fault.msg)
+ )
+
+ host_change_list.append(changed)
+
+ if any(host_change_list):
+ results['changed'] = True
+ self.module.exit_json(**results)
+
+ def get_ad_info(self, host_object):
+ """Get info about AD membership"""
+ active_directory_info = None
+ authentication_store_info = host_object.config.authenticationManagerInfo.authConfig
+ for authentication_info in authentication_store_info:
+ if isinstance(authentication_info, vim.host.ActiveDirectoryInfo):
+ active_directory_info = authentication_info
+ break
+ if not active_directory_info:
+ self.module.fail_json(
+ msg="Failed to get Active Directory info from authentication manager"
+ )
+ return active_directory_info
+
+ def get_ad_auth_object(self, host_object):
+ """Get AD authentication managed object"""
+ ad_authentication = None
+ authentication_store_info = host_object.configManager.authenticationManager.supportedStore
+ for store_info in authentication_store_info:
+ if isinstance(store_info, vim.host.ActiveDirectoryAuthentication):
+ ad_authentication = store_info
+ break
+ if not ad_authentication:
+ self.module.fail_json(
+ msg="Failed to get Active Directory authentication managed object from authentication manager"
+ )
+ return ad_authentication
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ ad_domain=dict(type='str', default='', aliases=['domain', 'domain_name']),
+ ad_user=dict(type='str', default=''),
+ ad_password=dict(type='str', default='', no_log=True),
+ ad_state=dict(default='absent', choices=['present', 'absent'], aliases=['state']),
+ esxi_hostname=dict(type='str', required=False),
+ cluster_name=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ required_if=[
+ ['ad_state', 'present', ['ad_domain', 'ad_user', 'ad_password']],
+ ],
+ supports_check_mode=True
+ )
+
+ ad_auth = VmwareHostAdAuthentication(module)
+ ad_auth.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_auto_start.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_auto_start.py
new file mode 100644
index 000000000..2aafc1ab6
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_auto_start.py
@@ -0,0 +1,379 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: vmware_host_auto_start
+short_description: Manage the auto power ON or OFF for vm on ESXi host
+author:
+ - sky-joker (@sky-joker)
+description:
+ - In this module, can set up automatic startup and shutdown of virtual machines according to host startup or shutdown.
+options:
+ esxi_hostname:
+ description:
+ - ESXi hostname where the VM to set auto power on or off exists.
+ type: str
+ required: true
+ name:
+ description:
+ - VM name to set auto power on or off.
+ - This is not necessary if change only system default VM settings for autoStart config.
+ type: str
+ uuid:
+ description:
+ - VM uuid to set auto power on or off, this is VMware's unique identifier.
+ - This is required if C(name) is not supplied.
+ - This is not necessary if change only system default VM settings for autoStart config.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ type: bool
+ default: false
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ type: str
+ system_defaults:
+ description:
+ - System defaults for auto-start or auto-stop config for virtual machine.
+ type: dict
+ suboptions:
+ enabled:
+ description:
+ - Enable automatically start or stop of virtual machines.
+ type: bool
+ default: false
+ start_delay:
+ description:
+ - Default auto start delay in seconds.
+ type: int
+ default: 120
+ stop_action:
+ description:
+ - Default stop action executed on the virtual machine when the system stops.
+ type: str
+ choices: ['none', 'guestShutdown', 'powerOff', 'suspend']
+ default: powerOff
+ stop_delay:
+ description:
+ - Default auto stop delay in seconds.
+ type: int
+ default: 120
+ wait_for_heartbeat:
+ description:
+ - Continue power on processing when VMware Tools started.
+ - If this parameter is enabled to powers on the next virtual machine without waiting for the delay to pass.
+ - However, the virtual machine must have VMware Tools installed.
+ type: bool
+ default: false
+ power_info:
+ description:
+ - Startup or shutdown settings of virtual machine.
+ - This setting will override the system defaults.
+ type: dict
+ default:
+ start_action: none
+ start_delay: -1
+ start_order: -1
+ stop_action: systemDefault
+ stop_delay: -1
+ wait_for_heartbeat: systemDefault
+ suboptions:
+ start_action:
+ description:
+ - Whether to start the virtual machine when the host startup.
+ type: str
+ choices: ['none', 'powerOn']
+ default: none
+ start_delay:
+ description:
+ - Auto start delay in seconds of virtual machine.
+ type: int
+ default: -1
+ start_order:
+ description:
+ - The autostart priority of virtual machine.
+ - Virtual machines with a lower number are powered on first.
+ - On host shutdown, the virtual machines are shut down in reverse order, meaning those with a higher number are powered off first.
+ type: int
+ default: -1
+ stop_action:
+ description:
+ - Stop action executed on the virtual machine when the system stops of virtual machine.
+ choices: ['none', 'systemDefault', 'powerOff', 'suspend']
+ type: str
+ default: systemDefault
+ stop_delay:
+ description:
+ - Auto stop delay in seconds of virtual machine.
+ type: int
+ default: -1
+ wait_for_heartbeat:
+ description:
+ - Continue power on processing when VMware Tools started.
+ type: str
+ choices: ['no', 'yes', 'systemDefault']
+ default: systemDefault
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+---
+- name: Update for system defaults config.
+ community.vmware.vmware_host_auto_start:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ system_defaults:
+ enabled: true
+ start_delay: 100
+ stop_action: guestShutdown
+
+- name: Update for powerInfo config of virtual machine.
+ community.vmware.vmware_host_auto_start:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ name: "{{ vm_name }}"
+ power_info:
+ start_action: powerOn
+ start_delay: 10
+ start_order: 1
+ stop_action: powerOff
+ wait_for_heartbeat: true
+'''
+
+RETURN = r'''
+system_defaults_config:
+ description: Parameter return when system defaults config is changed.
+ returned: changed
+ type: dict
+ sample: >-
+ {
+ "enabled": true,
+ "start_delay": 120,
+ "stop_action": "powerOff",
+ "stop_delay": 120,
+ "wait_for_heartbeat": false
+ }
+power_info_config:
+ description: Parameter return when virtual machine power info config is changed.
+ returned: changed
+ type: dict
+ sample: >-
+ {
+ "start_action": "powerOn",
+ "start_delay": -1,
+ "start_order": -1,
+ "stop_action": "systemDefault",
+ "stop_delay": -1,
+ "wait_for_heartbeat": "systemDefault"
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ try:
+ from ordereddict import OrderedDict
+ except ImportError:
+ pass
+
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+
+class VMwareHostAutoStartManager(PyVmomi):
+ def __init__(self, module):
+ super(VMwareHostAutoStartManager, self).__init__(module)
+ self.esxi_hostname = self.params['esxi_hostname']
+ self.name = self.params['name']
+ self.uuid = self.params['uuid']
+ self.moid = self.params['moid']
+ self.system_defaults = self.params['system_defaults']
+ self.power_info = self.params['power_info']
+
+ def generate_system_defaults_config(self):
+ system_defaults_config = vim.host.AutoStartManager.SystemDefaults()
+ system_defaults_config.enabled = self.system_defaults['enabled']
+ system_defaults_config.startDelay = self.system_defaults['start_delay']
+ system_defaults_config.stopAction = self.system_defaults['stop_action']
+ system_defaults_config.stopDelay = self.system_defaults['stop_delay']
+ system_defaults_config.waitForHeartbeat = self.system_defaults['wait_for_heartbeat']
+
+ return system_defaults_config
+
+ def generate_power_info_config(self):
+ power_info_config = vim.host.AutoStartManager.AutoPowerInfo()
+ power_info_config.key = self.vm_obj
+ power_info_config.startAction = self.power_info['start_action']
+ power_info_config.startDelay = self.power_info['start_delay']
+ power_info_config.startOrder = self.power_info['start_order']
+ power_info_config.stopAction = self.power_info['stop_action']
+ power_info_config.stopDelay = self.power_info['stop_delay']
+ power_info_config.waitForHeartbeat = self.power_info['wait_for_heartbeat']
+
+ return power_info_config
+
+ def execute(self):
+ result = dict(changed=False, diff={'before': {}, 'after': {}})
+
+ host_obj = self.find_hostsystem_by_name(self.esxi_hostname)
+ if not host_obj:
+ self.module.fail_json(msg="Cannot find the specified ESXi host: %s" % self.esxi_hostname)
+
+ self.vm_obj = None
+ if self.name or self.uuid or self.moid:
+ self.vm_obj = self.get_vm()
+ if not self.vm_obj:
+ self.module.fail_json(msg="Cannot find the specified VM: %s" % (self.name or self.uuid or self.moid))
+ elif self.esxi_hostname != self.vm_obj.runtime.host.name:
+ self.module.fail_json(msg="%s exists on another host: %s" % (self.name or self.uuid or self.moid, self.vm_obj.runtime.host.name))
+
+ # Check the existing autoStart setting difference.
+ system_defaults_config_difference = False
+ existing_system_defaults = self.to_json(host_obj.config.autoStart.defaults)
+ system_defaults_for_compare = dict(
+ enabled=existing_system_defaults['enabled'],
+ start_delay=existing_system_defaults['startDelay'],
+ stop_action=existing_system_defaults['stopAction'],
+ stop_delay=existing_system_defaults['stopDelay'],
+ wait_for_heartbeat=existing_system_defaults['waitForHeartbeat']
+ )
+ if self.system_defaults:
+ if 'guestshutdown' == system_defaults_for_compare['stop_action']:
+ system_defaults_for_compare['stop_action'] = 'guestShutdown'
+
+ if 'poweroff' == system_defaults_for_compare['stop_action']:
+ system_defaults_for_compare['stop_action'] = 'powerOff'
+
+ if system_defaults_for_compare != self.system_defaults:
+ result['diff']['before']['system_defaults'] = OrderedDict(sorted(system_defaults_for_compare.items()))
+ result['diff']['after']['system_defaults'] = OrderedDict(sorted(self.system_defaults.items()))
+ system_defaults_config_difference = True
+
+ # Check the existing autoStart powerInfo setting difference for VM.
+ vm_power_info_config_difference = False
+ existing_vm_power_info = {}
+ if system_defaults_for_compare['enabled'] and self.vm_obj:
+ for vm_power_info in host_obj.config.autoStart.powerInfo:
+ if vm_power_info.key == self.vm_obj:
+ existing_vm_power_info = self.to_json(vm_power_info)
+ break
+
+ if existing_vm_power_info:
+ vm_power_info_for_compare = dict(
+ start_action=existing_vm_power_info['startAction'],
+ start_delay=existing_vm_power_info['startDelay'],
+ start_order=existing_vm_power_info['startOrder'],
+ stop_action=existing_vm_power_info['stopAction'],
+ stop_delay=existing_vm_power_info['stopDelay'],
+ wait_for_heartbeat=existing_vm_power_info['waitForHeartbeat']
+ )
+ else:
+ vm_power_info_for_compare = dict(
+ start_action='none',
+ start_delay=-1,
+ start_order=-1,
+ stop_action='systemDefault',
+ stop_delay=-1,
+ wait_for_heartbeat='systemDefault'
+ )
+
+ if vm_power_info_for_compare != self.power_info:
+ result['diff']['before']['power_info'] = OrderedDict(sorted(vm_power_info_for_compare.items()))
+ result['diff']['after']['power_info'] = OrderedDict(sorted(self.power_info.items()))
+ vm_power_info_config_difference = True
+
+ auto_start_manager_config = vim.host.AutoStartManager.Config()
+ auto_start_manager_config.powerInfo = []
+ if system_defaults_config_difference or vm_power_info_config_difference:
+ if system_defaults_config_difference:
+ auto_start_manager_config.defaults = self.generate_system_defaults_config()
+ result['system_defaults_config'] = self.system_defaults
+
+ if vm_power_info_config_difference:
+ auto_start_manager_config.powerInfo = [self.generate_power_info_config()]
+ result['power_info_config'] = self.power_info
+
+ if self.module.check_mode:
+ result['changed'] = True
+ self.module.exit_json(**result)
+
+ try:
+ host_obj.configManager.autoStartManager.ReconfigureAutostart(spec=auto_start_manager_config)
+ result['changed'] = True
+ self.module.exit_json(**result)
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ self.module.exit_json(**result)
+ else:
+ self.module.exit_json(**result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(esxi_hostname=dict(type='str', required=True),
+ name=dict(type='str'),
+ uuid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ moid=dict(type='str'),
+ system_defaults=dict(type='dict',
+ options=dict(
+ enabled=dict(type='bool', default=False),
+ start_delay=dict(type='int', default=120),
+ stop_action=dict(type='str', choices=['none', 'guestShutdown',
+ 'powerOff', 'suspend'],
+ default='powerOff'),
+ stop_delay=dict(type='int', default=120),
+ wait_for_heartbeat=dict(type='bool', default=False)),
+ ),
+ power_info=dict(type='dict',
+ options=dict(
+ start_action=dict(type='str', choices=['none', 'powerOn'], default='none'),
+ start_delay=dict(type='int', default=-1),
+ start_order=dict(type='int', default=-1),
+ stop_action=dict(type='str', choices=['none', 'systemDefault', 'powerOff',
+ 'suspend'], default='systemDefault'),
+ stop_delay=dict(type='int', default=-1),
+ wait_for_heartbeat=dict(type='str', choices=['no', 'yes', 'systemDefault'],
+ default='systemDefault')),
+ default=dict(
+ start_action='none',
+ start_delay=-1,
+ start_order=-1,
+ stop_action='systemDefault',
+ stop_delay=-1,
+ wait_for_heartbeat='systemDefault'
+ ))
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vmware_host_auto_start = VMwareHostAutoStartManager(module)
+ vmware_host_auto_start.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_capability_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_capability_info.py
new file mode 100644
index 000000000..30d0864a1
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_capability_info.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_capability_info
+short_description: Gathers info about an ESXi host's capability information
+description:
+- This module can be used to gather information about an ESXi host's capability information when ESXi hostname or Cluster name is given.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster from all host systems to be used for information gathering.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname to gather information from.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather capability info about all ESXi Host in given Cluster
+ community.vmware.vmware_host_capability_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: cluster_name
+ delegate_to: localhost
+ register: all_cluster_hosts_info
+
+- name: Gather capability info about ESXi Host
+ community.vmware.vmware_host_capability_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+ register: hosts_info
+'''
+
+RETURN = r'''
+hosts_capability_info:
+ description: metadata about host's capability info
+ returned: always
+ type: dict
+ sample: {
+ "esxi_hostname_0001": {
+ "accel3dSupported": false,
+ "backgroundSnapshotsSupported": false,
+ "checkpointFtCompatibilityIssues": [],
+ "checkpointFtSupported": false,
+ "cloneFromSnapshotSupported": true,
+ "cpuHwMmuSupported": true,
+ }
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class CapabilityInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(CapabilityInfoManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+
+ def gather_host_capability_info(self):
+ hosts_capability_info = dict()
+ for host in self.hosts:
+ hc = host.capability
+ hosts_capability_info[host.name] = dict(
+ recursiveResourcePoolsSupported=hc.recursiveResourcePoolsSupported,
+ cpuMemoryResourceConfigurationSupported=hc.cpuMemoryResourceConfigurationSupported,
+ rebootSupported=hc.rebootSupported,
+ shutdownSupported=hc.shutdownSupported,
+ vmotionSupported=hc.vmotionSupported,
+ standbySupported=hc.standbySupported,
+ ipmiSupported=hc.ipmiSupported,
+ maxSupportedVMs=hc.maxSupportedVMs,
+ maxRunningVMs=hc.maxRunningVMs,
+ maxSupportedVcpus=hc.maxSupportedVcpus,
+ maxRegisteredVMs=hc.maxRegisteredVMs,
+ datastorePrincipalSupported=hc.datastorePrincipalSupported,
+ sanSupported=hc.sanSupported,
+ nfsSupported=hc.nfsSupported,
+ iscsiSupported=hc.iscsiSupported,
+ vlanTaggingSupported=hc.vlanTaggingSupported,
+ nicTeamingSupported=hc.nicTeamingSupported,
+ highGuestMemSupported=hc.highGuestMemSupported,
+ maintenanceModeSupported=hc.maintenanceModeSupported,
+ suspendedRelocateSupported=hc.suspendedRelocateSupported,
+ restrictedSnapshotRelocateSupported=hc.restrictedSnapshotRelocateSupported,
+ perVmSwapFiles=hc.perVmSwapFiles,
+ localSwapDatastoreSupported=hc.localSwapDatastoreSupported,
+ unsharedSwapVMotionSupported=hc.unsharedSwapVMotionSupported,
+ backgroundSnapshotsSupported=hc.backgroundSnapshotsSupported,
+ preAssignedPCIUnitNumbersSupported=hc.preAssignedPCIUnitNumbersSupported,
+ screenshotSupported=hc.screenshotSupported,
+ scaledScreenshotSupported=hc.scaledScreenshotSupported,
+ storageVMotionSupported=hc.storageVMotionSupported,
+ vmotionWithStorageVMotionSupported=hc.vmotionWithStorageVMotionSupported,
+ vmotionAcrossNetworkSupported=hc.vmotionAcrossNetworkSupported,
+ maxNumDisksSVMotion=hc.maxNumDisksSVMotion,
+ hbrNicSelectionSupported=hc.hbrNicSelectionSupported,
+ vrNfcNicSelectionSupported=hc.vrNfcNicSelectionSupported,
+ recordReplaySupported=hc.recordReplaySupported,
+ ftSupported=hc.ftSupported,
+ replayUnsupportedReason=hc.replayUnsupportedReason,
+ smpFtSupported=hc.smpFtSupported,
+ maxVcpusPerFtVm=hc.maxVcpusPerFtVm,
+ loginBySSLThumbprintSupported=hc.loginBySSLThumbprintSupported,
+ cloneFromSnapshotSupported=hc.cloneFromSnapshotSupported,
+ deltaDiskBackingsSupported=hc.deltaDiskBackingsSupported,
+ perVMNetworkTrafficShapingSupported=hc.perVMNetworkTrafficShapingSupported,
+ tpmSupported=hc.tpmSupported,
+ virtualExecUsageSupported=hc.virtualExecUsageSupported,
+ storageIORMSupported=hc.storageIORMSupported,
+ vmDirectPathGen2Supported=hc.vmDirectPathGen2Supported,
+ vmDirectPathGen2UnsupportedReasonExtended=hc.vmDirectPathGen2UnsupportedReasonExtended,
+ vStorageCapable=hc.vStorageCapable,
+ snapshotRelayoutSupported=hc.snapshotRelayoutSupported,
+ firewallIpRulesSupported=hc.firewallIpRulesSupported,
+ servicePackageInfoSupported=hc.servicePackageInfoSupported,
+ maxHostRunningVms=hc.maxHostRunningVms,
+ maxHostSupportedVcpus=hc.maxHostSupportedVcpus,
+ vmfsDatastoreMountCapable=hc.vmfsDatastoreMountCapable,
+ eightPlusHostVmfsSharedAccessSupported=hc.eightPlusHostVmfsSharedAccessSupported,
+ nestedHVSupported=hc.nestedHVSupported,
+ vPMCSupported=hc.vPMCSupported,
+ interVMCommunicationThroughVMCISupported=hc.interVMCommunicationThroughVMCISupported,
+ scheduledHardwareUpgradeSupported=hc.scheduledHardwareUpgradeSupported,
+ featureCapabilitiesSupported=hc.featureCapabilitiesSupported,
+ latencySensitivitySupported=hc.latencySensitivitySupported,
+ storagePolicySupported=hc.storagePolicySupported,
+ accel3dSupported=hc.accel3dSupported,
+ reliableMemoryAware=hc.reliableMemoryAware,
+ multipleNetworkStackInstanceSupported=hc.multipleNetworkStackInstanceSupported,
+ messageBusProxySupported=hc.messageBusProxySupported,
+ vsanSupported=hc.vsanSupported,
+ vFlashSupported=hc.vFlashSupported,
+ hostAccessManagerSupported=hc.hostAccessManagerSupported,
+ provisioningNicSelectionSupported=hc.provisioningNicSelectionSupported,
+ nfs41Supported=hc.nfs41Supported,
+ nfs41Krb5iSupported=hc.nfs41Krb5iSupported,
+ turnDiskLocatorLedSupported=hc.turnDiskLocatorLedSupported,
+ virtualVolumeDatastoreSupported=hc.virtualVolumeDatastoreSupported,
+ markAsSsdSupported=hc.markAsSsdSupported,
+ markAsLocalSupported=hc.markAsLocalSupported,
+ smartCardAuthenticationSupported=hc.smartCardAuthenticationSupported,
+ cryptoSupported=hc.cryptoSupported,
+ oneKVolumeAPIsSupported=hc.oneKVolumeAPIsSupported,
+ gatewayOnNicSupported=hc.gatewayOnNicSupported,
+ upitSupported=hc.upitSupported,
+ cpuHwMmuSupported=hc.cpuHwMmuSupported,
+ encryptedVMotionSupported=hc.encryptedVMotionSupported,
+ encryptionChangeOnAddRemoveSupported=hc.encryptionChangeOnAddRemoveSupported,
+ encryptionHotOperationSupported=hc.encryptionHotOperationSupported,
+ encryptionWithSnapshotsSupported=hc.encryptionWithSnapshotsSupported,
+ encryptionFaultToleranceSupported=hc.encryptionFaultToleranceSupported,
+ encryptionMemorySaveSupported=hc.encryptionMemorySaveSupported,
+ encryptionRDMSupported=hc.encryptionRDMSupported,
+ encryptionVFlashSupported=hc.encryptionVFlashSupported,
+ encryptionCBRCSupported=hc.encryptionCBRCSupported,
+ encryptionHBRSupported=hc.encryptionHBRSupported,
+ supportedVmfsMajorVersion=list(hc.supportedVmfsMajorVersion),
+ vmDirectPathGen2UnsupportedReason=list(hc.vmDirectPathGen2UnsupportedReason),
+ ftCompatibilityIssues=list(hc.ftCompatibilityIssues),
+ smpFtCompatibilityIssues=list(hc.smpFtCompatibilityIssues),
+ replayCompatibilityIssues=list(hc.replayCompatibilityIssues),
+ )
+
+ # The `checkpointFtSupported` and `checkpointFtCompatibilityIssues` properties have been removed from pyvmomi 7.0.
+ # The parameters can be substituted as follows.
+ # checkpointFtSupported => smpFtSupported
+ # checkpointFtSupported => smpFtCompatibilityIssues.
+ # So add `checkpointFtSupported` and `checkpointFtCompatibilityIssues` keys for compatibility with previous versions.
+ # https://github.com/ansible-collections/vmware/pull/118
+ hosts_capability_info[host.name]['checkpointFtSupported'] = hosts_capability_info[host.name]['smpFtSupported']
+ hosts_capability_info[host.name]['checkpointFtCompatibilityIssues'] = hosts_capability_info[host.name]['smpFtCompatibilityIssues']
+
+ return hosts_capability_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True,
+ )
+
+ host_capability_manager = CapabilityInfoManager(module)
+ module.exit_json(changed=False,
+ hosts_capability_info=host_capability_manager.gather_host_capability_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_config_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_config_info.py
new file mode 100644
index 000000000..bea73acb0
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_config_info.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_config_info
+short_description: Gathers info about an ESXi host's advance configuration information
+description:
+- This module can be used to gather information about an ESXi host's advance configuration information when ESXi hostname or Cluster name is given.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster from which the ESXi host belong to.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname to gather information from.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info about all ESXi Host in given Cluster
+ community.vmware.vmware_host_config_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: cluster_name
+ delegate_to: localhost
+
+- name: Gather info about ESXi Host
+ community.vmware.vmware_host_config_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+hosts_info:
+ description:
+ - dict with hostname as key and dict with host config information
+ returned: always
+ type: dict
+ sample: {
+ "10.76.33.226": {
+ "Annotations.WelcomeMessage": "",
+ "BufferCache.FlushInterval": 30000,
+ "BufferCache.HardMaxDirty": 95,
+ "BufferCache.PerFileHardMaxDirty": 50,
+ "BufferCache.SoftMaxDirty": 15,
+ }
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VmwareConfigInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareConfigInfoManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+
+ def gather_host_info(self):
+ hosts_info = {}
+ for host in self.hosts:
+ host_info = {}
+ for option in host.configManager.advancedOption.QueryOptions():
+ host_info[option.key] = option.value
+ hosts_info[host.name] = host_info
+ return hosts_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ vmware_host_config = VmwareConfigInfoManager(module)
+ module.exit_json(changed=False, hosts_info=vmware_host_config.gather_host_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_config_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_config_manager.py
new file mode 100644
index 000000000..3fe91ab80
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_config_manager.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_config_manager
+short_description: Manage advanced system settings of an ESXi host
+description:
+- This module can be used to manage advanced system settings of an ESXi host when ESXi hostname or Cluster name is given.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - Settings are applied to every ESXi host in given cluster.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname.
+ - Settings are applied to this ESXi host.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+ options:
+ description:
+ - A dictionary of advanced system settings.
+ - Invalid options will cause module to error.
+ - Note that the list of advanced options (with description and values) can be found by running `vim-cmd hostsvc/advopt/options`.
+ default: {}
+ type: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Manage Log level setting for all ESXi hosts in given Cluster
+ community.vmware.vmware_host_config_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: cluster_name
+ options:
+ 'Config.HostAgent.log.level': 'info'
+ delegate_to: localhost
+
+- name: Manage Log level setting for an ESXi host
+ community.vmware.vmware_host_config_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ options:
+ 'Config.HostAgent.log.level': 'verbose'
+ delegate_to: localhost
+
+- name: Manage multiple settings for an ESXi host
+ community.vmware.vmware_host_config_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ options:
+ 'Config.HostAgent.log.level': 'verbose'
+ 'Annotations.WelcomeMessage': 'Hello World'
+ 'Config.HostAgent.plugins.solo.enableMob': false
+ delegate_to: localhost
+'''
+
+RETURN = r'''#
+'''
+
+try:
+ from pyVmomi import vim, vmodl, VmomiSupport
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, is_boolean, is_integer, is_truthy
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import integer_types, string_types
+
+
+class VmwareConfigManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareConfigManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.options = self.params.get('options', dict())
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+
+ def set_host_configuration_facts(self):
+ changed_list = []
+ message = ''
+ for host in self.hosts:
+ option_manager = host.configManager.advancedOption
+ host_facts = {}
+ for s_option in option_manager.supportedOption:
+ host_facts[s_option.key] = dict(option_type=s_option.optionType, value=None)
+
+ for option in option_manager.QueryOptions():
+ if option.key in host_facts:
+ host_facts[option.key].update(
+ value=option.value,
+ )
+
+ change_option_list = []
+ for option_key, option_value in self.options.items():
+ if option_key in host_facts:
+ # We handle all supported types here so we can give meaningful errors.
+ option_type = host_facts[option_key]['option_type']
+ if is_boolean(option_value) and isinstance(option_type, vim.option.BoolOption):
+ option_value = is_truthy(option_value)
+ elif (isinstance(option_value, integer_types) or is_integer(option_value))\
+ and isinstance(option_type, vim.option.IntOption):
+ option_value = VmomiSupport.vmodlTypes['int'](option_value)
+ elif (isinstance(option_value, integer_types) or is_integer(option_value, 'long'))\
+ and isinstance(option_type, vim.option.LongOption):
+ option_value = VmomiSupport.vmodlTypes['long'](option_value)
+ elif isinstance(option_value, float) and isinstance(option_type, vim.option.FloatOption):
+ pass
+ elif isinstance(option_value, string_types) and isinstance(option_type, (vim.option.StringOption, vim.option.ChoiceOption)):
+ pass
+ else:
+ self.module.fail_json(msg="Provided value is of type %s."
+ " Option %s expects: %s" % (type(option_value), option_key, type(option_type)))
+
+ if option_value != host_facts[option_key]['value']:
+ change_option_list.append(vim.option.OptionValue(key=option_key, value=option_value))
+ changed_list.append(option_key)
+ else: # Don't silently drop unknown options. This prevents typos from falling through the cracks.
+ self.module.fail_json(msg="Unsupported option %s" % option_key)
+ if change_option_list:
+ if self.module.check_mode:
+ changed_suffix = ' would be changed.'
+ else:
+ changed_suffix = ' changed.'
+ if len(changed_list) > 2:
+ message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
+ elif len(changed_list) == 2:
+ message = ' and '.join(changed_list)
+ elif len(changed_list) == 1:
+ message = changed_list[0]
+ message += changed_suffix
+ if self.module.check_mode is False:
+ try:
+ option_manager.UpdateOptions(changedValue=change_option_list)
+ except (vmodl.fault.SystemError, vmodl.fault.InvalidArgument) as e:
+ self.module.fail_json(msg="Failed to update option/s as one or more OptionValue "
+ "contains an invalid value: %s" % to_native(e.msg))
+ except vim.fault.InvalidName as e:
+ self.module.fail_json(msg="Failed to update option/s as one or more OptionValue "
+ "objects refers to a non-existent option : %s" % to_native(e.msg))
+ else:
+ message = 'All settings are already configured.'
+
+ self.module.exit_json(changed=bool(changed_list), msg=message)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ options=dict(type='dict', default=dict(), required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ]
+ )
+
+ vmware_host_config = VmwareConfigManager(module)
+ vmware_host_config.set_host_configuration_facts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_custom_attributes.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_custom_attributes.py
new file mode 100644
index 000000000..31cb3d427
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_custom_attributes.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_custom_attributes
+short_description: Manage custom attributes from VMware for the given ESXi host
+description:
+ - This module can be used to add, remove and update custom attributes for the given ESXi host.
+author:
+ - Hunter Christain (@exp-hc)
+options:
+ esxi_hostname:
+ description:
+ - Name of the ESXi host to work with.
+ - This is a required parameter
+ required: true
+ type: str
+ state:
+ description:
+ - The action to take.
+ - If set to C(present), then custom attribute is added or updated.
+ - If set to C(absent), then custom attribute is removed.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+ attributes:
+ description:
+ - A list of name and value of custom attributes that needs to be manage.
+ - Value of custom attribute is not required and will be ignored, if C(state) is set to C(absent).
+ suboptions:
+ name:
+ description:
+ - Name of the attribute.
+ type: str
+ required: true
+ value:
+ description:
+ - Value of the attribute.
+ type: str
+ default: ''
+ default: []
+ type: list
+ elements: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add ESXi host custom attributes
+ community.vmware.vmware_host_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: host1
+ state: present
+ attributes:
+ - name: MyAttribute
+ value: MyValue
+ delegate_to: localhost
+ register: attributes
+
+- name: Remove ESXi host Attribute
+ community.vmware.vmware_host_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: host1
+ state: absent
+ attributes:
+ - name: MyAttribute
+ delegate_to: localhost
+ register: attributes
+'''
+
+RETURN = r'''
+custom_attributes:
+ description: metadata about the ESXi host attributes
+ returned: changed
+ type: dict
+ sample: {
+ "mycustom": "my_custom_value",
+ "mycustom_2": "my_custom_value_2",
+ "sample_1": "sample_1_value",
+ "sample_2": "sample_2_value",
+ "sample_3": "sample_3_value"
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class HostAttributeManager(PyVmomi):
+ def __init__(self, module):
+ super(HostAttributeManager, self).__init__(module)
+ self.esxi_hostname = module.params.get('esxi_hostname')
+ self.host = self.find_hostsystem_by_name(self.esxi_hostname)
+
+ def set_custom_field(self, host, user_fields):
+ result_fields = dict()
+ change_list = list()
+ changed = False
+
+ for field in user_fields:
+ field_key = self.check_exists(field['name'])
+ found = False
+ field_value = field.get('value', '')
+
+ for k, v in [(x.name, v.value) for x in self.custom_field_mgr for v in host.customValue if x.key == v.key]:
+ if k == field['name']:
+ found = True
+ if v != field_value:
+ if not self.module.check_mode:
+ self.content.customFieldsManager.SetField(entity=host, key=field_key.key, value=field_value)
+ result_fields[k] = field_value
+ change_list.append(True)
+ if not found and field_value != "":
+ if not field_key and not self.module.check_mode:
+ field_key = self.content.customFieldsManager.AddFieldDefinition(name=field['name'], moType=vim.HostSystem)
+ change_list.append(True)
+ if not self.module.check_mode:
+ self.content.customFieldsManager.SetField(entity=host, key=field_key.key, value=field_value)
+ result_fields[field['name']] = field_value
+
+ if any(change_list):
+ changed = True
+
+ return {'changed': changed, 'failed': False, 'custom_attributes': result_fields}
+
+ def check_exists(self, field):
+ for x in self.custom_field_mgr:
+ # The custom attribute should be either global (managedObjectType == None) or host specific
+ if x.managedObjectType in (None, vim.HostSystem) and x.name == field:
+ return x
+ return False
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ esxi_hostname=dict(type='str', required=True),
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ attributes=dict(
+ type='list',
+ default=[],
+ elements='dict',
+ options=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str', default=''),
+ )
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ pyv = HostAttributeManager(module)
+ results = {'changed': False, 'failed': False, 'instance': dict()}
+
+ # Check if the virtual machine exists before continuing
+ host = pyv.host
+
+ if host:
+ # host already exists
+ if module.params['state'] == "present":
+ results = pyv.set_custom_field(host, module.params['attributes'])
+ elif module.params['state'] == "absent":
+ results = pyv.set_custom_field(host, module.params['attributes'])
+ module.exit_json(**results)
+ else:
+ # host does not exists
+ module.fail_json(msg="Unable to manage custom attributes for non-existing"
+ " host %s" % pyv.esxi_hostname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_datastore.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_datastore.py
new file mode 100644
index 000000000..3d28aa184
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_datastore.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_datastore
+short_description: Manage a datastore on ESXi host
+description:
+- This module can be used to mount/umount datastore on ESXi host.
+- This module only supports NFS (NFS v3 or NFS v4.1) and VMFS datastores.
+- For VMFS datastore, available device must already be connected on ESXi host.
+- All parameters and VMware object names are case sensitive.
+author:
+- Ludovic Rivallain (@lrivallain) <ludovic.rivallain@gmail.com>
+- Christian Kotte (@ckotte) <christian.kotte@gmx.de>
+notes:
+- Kerberos authentication with NFS v4.1 isn't implemented
+options:
+ datastore_name:
+ description:
+ - Name of the datastore to add/remove.
+ required: true
+ type: str
+ datastore_type:
+ description:
+ - Type of the datastore to configure (nfs/nfs41/vmfs).
+ choices: [ 'nfs', 'nfs41', 'vmfs' ]
+ type: str
+ nfs_server:
+ description:
+ - NFS host serving nfs datastore.
+ - Required if datastore type is set to C(nfs)/C(nfs41) and state is set to C(present), else unused.
+ - Two or more servers can be defined if datastore type is set to C(nfs41)
+ type: str
+ nfs_path:
+ description:
+ - Resource path on NFS host.
+ - Required if datastore type is set to C(nfs)/C(nfs41) and state is set to C(present), else unused.
+ type: str
+ nfs_ro:
+ description:
+ - ReadOnly or ReadWrite mount.
+ - Unused if datastore type is not set to C(nfs)/C(nfs41) and state is not set to C(present).
+ default: false
+ type: bool
+ vmfs_device_name:
+ description:
+ - Name of the device to be used as VMFS datastore.
+ - Required for VMFS datastore type and state is set to C(present), else unused.
+ type: str
+ vmfs_version:
+ description:
+ - VMFS version to use for datastore creation.
+ - Unused if datastore type is not set to C(vmfs) and state is not set to C(present).
+ type: int
+ esxi_hostname:
+ description:
+ - ESXi hostname to manage the datastore.
+ - Required when used with a vcenter
+ type: str
+ required: false
+ auto_expand:
+ description:
+ - Expand a datastore capacity to full if it has free capacity.
+ - This parameter can't be extend using another datastore.
+ - A use case example in I(auto_expand), it can be used to expand a datastore capacity after increasing LUN volume.
+ type: bool
+ default: true
+ state:
+ description:
+ - "present: Mount datastore on host if datastore is absent else do nothing."
+ - "absent: Umount datastore if datastore is present else do nothing."
+ default: present
+ choices: [ present, absent ]
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Mount VMFS datastores to ESXi
+ community.vmware.vmware_host_datastore:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datastore_name: '{{ item.name }}'
+ datastore_type: '{{ item.type }}'
+ vmfs_device_name: 'naa.XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
+ vmfs_version: 6
+ esxi_hostname: '{{ inventory_hostname }}'
+ state: present
+ delegate_to: localhost
+
+- name: Mount NFS datastores to ESXi
+ community.vmware.vmware_host_datastore:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datastore_name: '{{ item.name }}'
+ datastore_type: '{{ item.type }}'
+ nfs_server: '{{ item.server }}'
+ nfs_path: '{{ item.path }}'
+ nfs_ro: false
+ esxi_hostname: '{{ inventory_hostname }}'
+ state: present
+ delegate_to: localhost
+ loop:
+ - { 'name': 'NasDS_vol01', 'server': 'nas01', 'path': '/mnt/vol01', 'type': 'nfs'}
+ - { 'name': 'NasDS_vol02', 'server': 'nas01', 'path': '/mnt/vol02', 'type': 'nfs'}
+
+- name: Mount NFS v4.1 datastores to ESXi
+ community.vmware.vmware_host_datastore:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datastore_name: '{{ item.name }}'
+ datastore_type: '{{ item.type }}'
+ nfs_server: '{{ item.server }}'
+ nfs_path: '{{ item.path }}'
+ nfs_ro: false
+ esxi_hostname: '{{ inventory_hostname }}'
+ state: present
+ delegate_to: localhost
+ loop:
+ - { 'name': 'NasDS_vol03', 'server': 'nas01,nas02', 'path': '/mnt/vol01', 'type': 'nfs41'}
+ - { 'name': 'NasDS_vol04', 'server': 'nas01,nas02', 'path': '/mnt/vol02', 'type': 'nfs41'}
+
+- name: Remove/Umount Datastores from a ESXi
+ community.vmware.vmware_host_datastore:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ datastore_name: NasDS_vol01
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datastore_by_name, find_obj
+from ansible.module_utils._text import to_native
+
+
+class VMwareHostDatastore(PyVmomi):
+ def __init__(self, module):
+ super(VMwareHostDatastore, self).__init__(module)
+
+ self.datastore_name = module.params['datastore_name']
+ self.datastore_type = module.params['datastore_type']
+ self.nfs_server = module.params['nfs_server']
+ self.nfs_path = module.params['nfs_path']
+ self.nfs_ro = module.params['nfs_ro']
+ self.vmfs_device_name = module.params['vmfs_device_name']
+ self.vmfs_version = module.params['vmfs_version']
+ self.esxi_hostname = module.params['esxi_hostname']
+ self.auto_expand = module.params['auto_expand']
+ self.state = module.params['state']
+
+ if self.is_vcenter():
+ if not self.esxi_hostname:
+ self.module.fail_json(msg="esxi_hostname is mandatory with a vcenter")
+ self.esxi = self.find_hostsystem_by_name(self.esxi_hostname)
+ if self.esxi is None:
+ self.module.fail_json(msg="Failed to find ESXi hostname %s" % self.esxi_hostname)
+ else:
+ self.esxi = find_obj(self.content, [vim.HostSystem], None)
+
+ def process_state(self):
+ ds_states = {
+ 'absent': {
+ 'present': self.umount_datastore_host,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_exit_unchanged,
+ 'absent': self.mount_datastore_host,
+ }
+ }
+ try:
+ ds_states[self.state][self.check_datastore_host_state()]()
+ except (vmodl.RuntimeFault, vmodl.MethodFault) as vmodl_fault:
+ self.module.fail_json(msg=to_native(vmodl_fault.msg))
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ def expand_datastore_up_to_full(self):
+ """
+ Expand a datastore capacity up to full if there is free capacity.
+ """
+ cnf_mng = self.esxi.configManager
+
+ # Find attached datastore at host.
+ for datastore_obj in self.esxi.datastore:
+ if datastore_obj.name == self.datastore_name:
+ expand_datastore_obj = datastore_obj
+ break
+
+ # Check that whether the datastore has free capacity to expand.
+ vmfs_ds_options = cnf_mng.datastoreSystem.QueryVmfsDatastoreExpandOptions(expand_datastore_obj)
+ if vmfs_ds_options:
+ if self.module.check_mode is False:
+ try:
+ cnf_mng.datastoreSystem.ExpandVmfsDatastore(datastore=expand_datastore_obj,
+ spec=vmfs_ds_options[0].spec)
+ except Exception as e:
+ self.module.fail_json(msg="%s can not expand the datastore: %s" % (to_native(e.msg), self.datastore_name))
+
+ self.module.exit_json(changed=True)
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def check_datastore_host_state(self):
+ storage_system = self.esxi.configManager.storageSystem
+ host_file_sys_vol_mount_info = storage_system.fileSystemVolumeInfo.mountInfo
+ for host_mount_info in host_file_sys_vol_mount_info:
+ if host_mount_info.volume.name == self.datastore_name:
+ if self.auto_expand and host_mount_info.volume.type == "VMFS":
+ self.expand_datastore_up_to_full()
+ return 'present'
+ return 'absent'
+
+ def get_used_disks_names(self):
+ used_disks = []
+ storage_system = self.esxi.configManager.storageSystem
+ for each_vol_mount_info in storage_system.fileSystemVolumeInfo.mountInfo:
+ if hasattr(each_vol_mount_info.volume, 'extent'):
+ for each_partition in each_vol_mount_info.volume.extent:
+ used_disks.append(each_partition.diskName)
+ return used_disks
+
+ def umount_datastore_host(self):
+ ds = find_datastore_by_name(self.content, self.datastore_name)
+ if not ds:
+ self.module.fail_json(msg="No datastore found with name %s" % self.datastore_name)
+ if self.module.check_mode is False:
+ error_message_umount = "Cannot umount datastore %s from host %s" % (self.datastore_name, self.esxi.name)
+ try:
+ self.esxi.configManager.datastoreSystem.RemoveDatastore(ds)
+ except (vim.fault.NotFound, vim.fault.HostConfigFault, vim.fault.ResourceInUse) as fault:
+ self.module.fail_json(msg="%s: %s" % (error_message_umount, to_native(fault.msg)))
+ except Exception as e:
+ self.module.fail_json(msg="%s: %s" % (error_message_umount, to_native(e)))
+ self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi.name))
+
+ def mount_datastore_host(self):
+ if self.datastore_type == 'nfs' or self.datastore_type == 'nfs41':
+ self.mount_nfs_datastore_host()
+ if self.datastore_type == 'vmfs':
+ self.mount_vmfs_datastore_host()
+
+ def mount_nfs_datastore_host(self):
+ if self.module.check_mode is False:
+ mnt_specs = vim.host.NasVolume.Specification()
+ # NFS v3
+ if self.datastore_type == 'nfs':
+ mnt_specs.type = "NFS"
+ mnt_specs.remoteHost = self.nfs_server
+ # NFS v4.1
+ if self.datastore_type == 'nfs41':
+ mnt_specs.type = "NFS41"
+ # remoteHost needs to be set to a non-empty string, but the value is not used
+ mnt_specs.remoteHost = "something"
+ mnt_specs.remoteHostNames = [self.nfs_server]
+ mnt_specs.remotePath = self.nfs_path
+ mnt_specs.localPath = self.datastore_name
+ if self.nfs_ro:
+ mnt_specs.accessMode = "readOnly"
+ else:
+ mnt_specs.accessMode = "readWrite"
+ error_message_mount = "Cannot mount datastore %s on host %s" % (self.datastore_name, self.esxi.name)
+ try:
+ ds = self.esxi.configManager.datastoreSystem.CreateNasDatastore(mnt_specs)
+ if not ds:
+ self.module.fail_json(msg=error_message_mount)
+ except (vim.fault.NotFound, vim.fault.DuplicateName,
+ vim.fault.AlreadyExists, vim.fault.HostConfigFault,
+ vmodl.fault.InvalidArgument, vim.fault.NoVirtualNic,
+ vim.fault.NoGateway) as fault:
+ self.module.fail_json(msg="%s: %s" % (error_message_mount, to_native(fault.msg)))
+ except Exception as e:
+ self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(e)))
+ self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi.name))
+
+ def mount_vmfs_datastore_host(self):
+ if self.module.check_mode is False:
+ ds_path = "/vmfs/devices/disks/" + str(self.vmfs_device_name)
+ host_ds_system = self.esxi.configManager.datastoreSystem
+ ds_system = vim.host.DatastoreSystem
+ if self.vmfs_device_name in self.get_used_disks_names():
+ error_message_used_disk = "VMFS disk %s already in use" % self.vmfs_device_name
+ self.module.fail_json(msg="%s" % error_message_used_disk)
+ error_message_mount = "Cannot mount datastore %s on host %s" % (self.datastore_name, self.esxi.name)
+ try:
+ vmfs_ds_options = ds_system.QueryVmfsDatastoreCreateOptions(host_ds_system,
+ ds_path,
+ self.vmfs_version)
+ vmfs_ds_options[0].spec.vmfs.volumeName = self.datastore_name
+ ds_system.CreateVmfsDatastore(
+ host_ds_system,
+ vmfs_ds_options[0].spec)
+ except (vim.fault.NotFound, vim.fault.DuplicateName,
+ vim.fault.HostConfigFault, vmodl.fault.InvalidArgument) as fault:
+ self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(fault.msg)))
+ except Exception as e:
+ self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(e)))
+ self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi.name))
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datastore_name=dict(type='str', required=True),
+ datastore_type=dict(type='str', choices=['nfs', 'nfs41', 'vmfs']),
+ nfs_server=dict(type='str'),
+ nfs_path=dict(type='str'),
+ nfs_ro=dict(type='bool', default=False),
+ vmfs_device_name=dict(type='str'),
+ vmfs_version=dict(type='int'),
+ esxi_hostname=dict(type='str', required=False),
+ auto_expand=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'present'])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ['state', 'present', ['datastore_type']],
+ ['datastore_type', 'vmfs', ['vmfs_device_name']],
+ ['datastore_type', 'nfs', ['nfs_server', 'nfs_path']],
+ ['datastore_type', 'nfs41', ['nfs_server', 'nfs_path']],
+ ]
+ )
+
+ vmware_host_datastore = VMwareHostDatastore(module)
+ vmware_host_datastore.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_disk_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_disk_info.py
new file mode 100644
index 000000000..2ba105928
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_disk_info.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Matt Proud
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: vmware_host_disk_info
+short_description: Gathers information about disks attached to given ESXi host/s.
+description:
+- This module returns information about disks attached to given ESXi host/s
+- If I(cluster_name) is provided, then disk information about all hosts from the given cluster will be returned.
+- If I(esxi_hostname) is provided, then disk information about the given host system will be returned.
+author:
+- Matt Proud (@laidbackware)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster from which the ESXi host belong to.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname to gather information from.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = '''
+- name: Gather info about vmhbas of all ESXi Host in the given Cluster
+ community.vmware.vmware_host_disk_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ delegate_to: localhost
+ register: cluster_host_vmhbas
+
+- name: Gather info about vmhbas of an ESXi Host
+ community.vmware.vmware_host_disk_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+ register: host_vmhbas
+'''
+
+RETURN = '''
+hosts_disk_info:
+ description: list of information for all disks attached to each ESXi host
+ returned: always
+ type: list
+ sample: >-
+ "192.168.0.182": [
+ {
+ "canonical_name": "naa.6000c296ed6217bd61df35622eb21a3a",
+ "capacity_mb": 4096,
+ "device_path": "/vmfs/devices/disks/naa.6000c296ed6217bd61df35622eb21a3a",
+ "device_type": "disk",
+ "device_ctd_list": [
+ "vmhba0:C0:T1:L0"
+ ],
+ "disk_uid": "key-vim.host.ScsiDisk-02000000006000c296ed6217bd61df35622eb21a3a566972747561",
+ "display_name": "Local VMware Disk (naa.6000c296ed6217bd61df35622eb21a3a)"
+ },
+ {
+ "canonical_name": "naa.6000c2968ad7142d93faae527fe8822b",
+ "capacity_mb": 204800,
+ "device_path": "/vmfs/devices/disks/naa.6000c2968ad7142d93faae527fe8822b",
+ "device_type": "disk",
+ "device_ctd_list": [
+ "vmhba0:C0:T3:L0"
+ ],
+ "disk_uid": "key-vim.host.ScsiDisk-02000000006000c2968ad7142d93faae527fe8822b566972747561",
+ "display_name": "Local VMware Disk (naa.6000c2968ad7142d93faae527fe8822b)"
+ },]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class HostDiskInfo(PyVmomi):
+ """Class to return host disk info"""
+
+ def __init__(self, module):
+ super(HostDiskInfo, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system.")
+
+ def gather_host_disk_info(self):
+ hosts_disk_info = {}
+ for host in self.hosts:
+ host_disk_info = []
+ storage_system = host.configManager.storageSystem.storageDeviceInfo
+ # Collect target lookup for naa devices
+ lun_lookup = {}
+ for lun in storage_system.multipathInfo.lun:
+ key = lun.lun
+ paths = []
+ for path in lun.path:
+ paths.append(path.name)
+ lun_lookup[key] = paths
+
+ for disk in storage_system.scsiLun:
+ canonical_name = disk.canonicalName
+ try:
+ capacity = int(disk.capacity.block * disk.capacity.blockSize / 1048576)
+ except AttributeError:
+ capacity = 0
+ try:
+ device_path = disk.devicePath
+ except AttributeError:
+ device_path = ""
+ device_type = disk.deviceType
+ display_name = disk.displayName
+ disk_uid = disk.key
+ device_ctd_list = lun_lookup[disk_uid]
+
+ disk_dict = {"capacity_mb": capacity,
+ "device_path": device_path,
+ "device_type": device_type,
+ "display_name": display_name,
+ "disk_uid": disk_uid,
+ "device_ctd_list": device_ctd_list,
+ "canonical_name": canonical_name}
+ host_disk_info.append(disk_dict)
+
+ hosts_disk_info[host.name] = host_disk_info
+
+ return hosts_disk_info
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True,
+ )
+
+ host_disk_mgr = HostDiskInfo(module)
+ module.exit_json(changed=False, hosts_disk_info=host_disk_mgr.gather_host_disk_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_dns.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_dns.py
new file mode 100644
index 000000000..0a03f0ae2
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_dns.py
@@ -0,0 +1,459 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_dns
+short_description: Manage DNS configuration of an ESXi host system
+description:
+- This module can be used to configure DNS for the default TCP/IP stack on an ESXi host system.
+author:
+- Christian Kotte (@ckotte)
+- Mario Lenz (@mariolenz)
+notes:
+- This module is a replacement for the module C(vmware_dns_config)
+options:
+ type:
+ description:
+ - Type of DNS assignment. Either C(dhcp) or C(static).
+ - A VMkernel adapter needs to be set to DHCP if C(type) is set to C(dhcp).
+ type: str
+ choices: [ 'dhcp', 'static' ]
+ required: true
+ device:
+ description:
+ - The VMkernel network adapter to obtain DNS settings from.
+ - Needs to get its IP through DHCP, a static network configuration combined with a dynamic DNS configuration doesn't work.
+ - The parameter is only required in case of C(type) is set to C(dhcp).
+ type: str
+ host_name:
+ description:
+ - The hostname to be used for the ESXi host.
+ - Cannot be used when configuring a complete cluster.
+ type: str
+ domain:
+ description:
+ - The domain name to be used for the ESXi host.
+ type: str
+ dns_servers:
+ description:
+ - A list of DNS servers to be used.
+ - The order of the DNS servers is important as they are used consecutively in order.
+ type: list
+ elements: str
+ search_domains:
+ description:
+ - A list of domains to be searched through by the resolver.
+ type: list
+ elements: str
+ verbose:
+ description:
+ - Verbose output of the DNS server configuration change.
+ - Explains if an DNS server was added, removed, or if the DNS server sequence was changed.
+ type: bool
+ default: false
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - This parameter is required if C(cluster_name) is not specified and you connect to a vCenter.
+ - Cannot be used when you connect directly to an ESXi host.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - This parameter is required if C(esxi_hostname) is not specified and you connect to a vCenter.
+ - Cannot be used when you connect directly to an ESXi host.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure DNS for an ESXi host
+ community.vmware.vmware_host_dns:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ type: static
+ host_name: esx01
+ domain: example.local
+ dns_servers:
+ - 192.168.1.10
+ - 192.168.1.11
+ search_domains:
+ - subdomain.example.local
+ - example.local
+ delegate_to: localhost
+
+- name: Configure DNS for all ESXi hosts of a cluster
+ community.vmware.vmware_host_dns:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ type: static
+ domain: example.local
+ dns_servers:
+ - 192.168.1.10
+ - 192.168.1.11
+ search_domains:
+ - subdomain.example.local
+ - example.local
+ delegate_to: localhost
+
+- name: Configure DNS via DHCP for an ESXi host
+ community.vmware.vmware_host_dns:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ type: dhcp
+ device: vmk0
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+dns_config_result:
+ description: metadata about host system's DNS configuration
+ returned: always
+ type: dict
+ sample: {
+ "esx01.example.local": {
+ "changed": true,
+ "dns_servers_changed": ["192.168.1.12", "192.168.1.13"],
+ "dns_servers": ["192.168.1.10", "192.168.1.11"],
+ "dns_servers_previous": ["192.168.1.10", "192.168.1.11", "192.168.1.12", "192.168.1.13"],
+ "domain": "example.local",
+ "host_name": "esx01",
+ "msg": "DNS servers and Search domains changed",
+ "search_domains_changed": ["subdomain.example.local"],
+ "search_domains": ["subdomain.example.local", "example.local"],
+ "search_domains_previous": ["example.local"],
+ },
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VmwareHostDNS(PyVmomi):
+ """Class to manage DNS configuration of an ESXi host system"""
+
+ def __init__(self, module):
+ super(VmwareHostDNS, self).__init__(module)
+ self.cluster_name = self.params.get('cluster_name')
+ self.esxi_host_name = self.params.get('esxi_hostname')
+ if self.is_vcenter():
+ if not self.cluster_name and not self.esxi_host_name:
+ self.module.fail_json(
+ msg="You connected to a vCenter but didn't specify the cluster_name or esxi_hostname you want to configure."
+ )
+ else:
+ if self.cluster_name:
+ self.module.warn(
+ "You connected directly to an ESXi host, cluster_name will be ignored."
+ )
+ if self.esxi_host_name:
+ self.module.warn(
+ "You connected directly to an ESXi host, esxi_host_name will be ignored."
+ )
+ self.hosts = self.get_all_host_objs(cluster_name=self.cluster_name, esxi_host_name=self.esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system(s).")
+ self.network_type = self.params.get('type')
+ self.vmkernel_device = self.params.get('device')
+ self.host_name = self.params.get('host_name')
+ self.domain = self.params.get('domain')
+ self.dns_servers = self.params.get('dns_servers')
+ self.search_domains = self.params.get('search_domains')
+
+ def ensure(self):
+ """Function to manage DNS configuration of an ESXi host system"""
+ results = dict(changed=False, dns_config_result=dict())
+ verbose = self.module.params.get('verbose', False)
+ host_change_list = []
+ for host in self.hosts:
+ initial_name = host.name
+ changed = False
+ changed_list = []
+ host_result = {'changed': '', 'msg': '', 'host_name': host.name}
+
+ host_netstack_config = host.config.network.netStackInstance
+ for instance in host_netstack_config:
+ if instance.key == 'defaultTcpipStack':
+ netstack_spec = vim.host.NetworkConfig.NetStackSpec()
+ netstack_spec.operation = 'edit'
+ netstack_spec.netStackInstance = vim.host.NetStackInstance()
+ netstack_spec.netStackInstance.key = 'defaultTcpipStack'
+ dns_config = vim.host.DnsConfig()
+ host_result['dns_config'] = self.network_type
+ host_result['search_domains'] = self.search_domains
+ if self.network_type == 'static':
+ if self.host_name:
+ if instance.dnsConfig.hostName != self.host_name:
+ host_result['host_name_previous'] = instance.dnsConfig.hostName
+ changed = True
+ changed_list.append("Host name")
+ dns_config.hostName = self.host_name
+ else:
+ dns_config.hostName = instance.dnsConfig.hostName
+
+ if self.search_domains is not None:
+ if instance.dnsConfig.searchDomain != self.search_domains:
+ host_result['search_domains_previous'] = instance.dnsConfig.searchDomain
+ host_result['search_domains_changed'] = (
+ self.get_differt_entries(instance.dnsConfig.searchDomain, self.search_domains)
+ )
+ changed = True
+ changed_list.append("Search domains")
+ dns_config.searchDomain = self.search_domains
+ else:
+ dns_config.searchDomain = instance.dnsConfig.searchDomain
+
+ if instance.dnsConfig.dhcp:
+ host_result['domain'] = self.domain
+ host_result['dns_servers'] = self.dns_servers
+ host_result['search_domains'] = self.search_domains
+ host_result['dns_config_previous'] = 'DHCP'
+ changed = True
+ changed_list.append("DNS configuration")
+ dns_config.dhcp = False
+ dns_config.virtualNicDevice = None
+ dns_config.domainName = self.domain
+ dns_config.address = self.dns_servers
+ dns_config.searchDomain = self.search_domains
+ else:
+ # Check host name
+
+ # Check domain
+ host_result['domain'] = self.domain
+ if self.domain is not None:
+ if instance.dnsConfig.domainName != self.domain:
+ host_result['domain_previous'] = instance.dnsConfig.domainName
+ changed = True
+ changed_list.append("Domain")
+ dns_config.domainName = self.domain
+ else:
+ dns_config.domainName = instance.dnsConfig.domainName
+
+ # Check DNS server(s)
+ host_result['dns_servers'] = self.dns_servers
+ if self.dns_servers is not None:
+ if instance.dnsConfig.address != self.dns_servers:
+ host_result['dns_servers_previous'] = instance.dnsConfig.address
+ host_result['dns_servers_changed'] = (
+ self.get_differt_entries(instance.dnsConfig.address, self.dns_servers)
+ )
+ changed = True
+ # build verbose message
+ if verbose:
+ dns_servers_verbose_message = self.build_changed_message(
+ instance.dnsConfig.address,
+ self.dns_servers
+ )
+ else:
+ changed_list.append("DNS servers")
+ dns_config.address = self.dns_servers
+ else:
+ dns_config.address = instance.dnsConfig.address
+
+ elif self.network_type == 'dhcp' and not instance.dnsConfig.dhcp:
+ host_result['device'] = self.vmkernel_device
+ host_result['dns_config_previous'] = 'static'
+ changed = True
+ changed_list.append("DNS configuration")
+ dns_config.dhcp = True
+ dns_config.virtualNicDevice = self.vmkernel_device
+ netstack_spec.netStackInstance.dnsConfig = dns_config
+ config = vim.host.NetworkConfig()
+ config.netStackSpec = [netstack_spec]
+
+ if changed:
+ if self.module.check_mode:
+ changed_suffix = ' would be changed'
+ else:
+ changed_suffix = ' changed'
+ if len(changed_list) > 2:
+ message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
+ elif len(changed_list) == 2:
+ message = ' and '.join(changed_list)
+ elif len(changed_list) == 1:
+ message = changed_list[0]
+ if verbose and dns_servers_verbose_message:
+ if changed_list:
+ message = message + changed_suffix + '. ' + dns_servers_verbose_message + '.'
+ else:
+ message = dns_servers_verbose_message
+ else:
+ message += changed_suffix
+ host_result['changed'] = True
+ host_network_system = host.configManager.networkSystem
+ if not self.module.check_mode:
+ try:
+ host_network_system.UpdateNetworkConfig(config, 'modify')
+ except vim.fault.AlreadyExists:
+ self.module.fail_json(
+ msg="Network entity specified in the configuration already exist on host '%s'" % host.name
+ )
+ except vim.fault.NotFound:
+ self.module.fail_json(
+ msg="Network entity specified in the configuration doesn't exist on host '%s'" % host.name
+ )
+ except vim.fault.ResourceInUse:
+ self.module.fail_json(msg="Resource is in use on host '%s'" % host.name)
+ except vmodl.fault.InvalidArgument:
+ self.module.fail_json(
+ msg="An invalid parameter is passed in for one of the networking objects for host '%s'" %
+ host.name
+ )
+ except vmodl.fault.NotSupported as not_supported:
+ self.module.fail_json(
+ msg="Operation isn't supported for the instance on '%s' : %s" %
+ (host.name, to_native(not_supported.msg))
+ )
+ except vim.fault.HostConfigFault as config_fault:
+ self.module.fail_json(
+ msg="Failed to configure TCP/IP stacks for host '%s' due to : %s" %
+ (host.name, to_native(config_fault.msg))
+ )
+ else:
+ host_result['changed'] = False
+ message = 'All settings are already configured'
+
+ host_result['msg'] = message
+ results['dns_config_result'][initial_name] = host_result
+
+ host_change_list.append(changed)
+
+ if any(host_change_list):
+ results['changed'] = True
+ self.module.exit_json(**results)
+
+ def build_changed_message(self, dns_servers_configured, dns_servers_new):
+ """Build changed message"""
+ check_mode = 'would be ' if self.module.check_mode else ''
+ # get differences
+ add = self.get_not_in_list_one(dns_servers_new, dns_servers_configured)
+ remove = self.get_not_in_list_one(dns_servers_configured, dns_servers_new)
+ diff_servers = list(dns_servers_configured)
+ if add and remove:
+ for server in add:
+ diff_servers.append(server)
+ for server in remove:
+ diff_servers.remove(server)
+ if dns_servers_new != diff_servers:
+ message = (
+ "DNS server %s %sadded and %s %sremoved and the server sequence %schanged as well" %
+ (self.array_to_string(add), check_mode, self.array_to_string(remove), check_mode, check_mode)
+ )
+ else:
+ if dns_servers_new != dns_servers_configured:
+ message = (
+ "DNS server %s %sreplaced with %s" %
+ (self.array_to_string(remove), check_mode, self.array_to_string(add))
+ )
+ else:
+ message = (
+ "DNS server %s %sremoved and %s %sadded" %
+ (self.array_to_string(remove), check_mode, self.array_to_string(add), check_mode)
+ )
+ elif add:
+ for server in add:
+ diff_servers.append(server)
+ if dns_servers_new != diff_servers:
+ message = (
+ "DNS server %s %sadded and the server sequence %schanged as well" %
+ (self.array_to_string(add), check_mode, check_mode)
+ )
+ else:
+ message = "DNS server %s %sadded" % (self.array_to_string(add), check_mode)
+ elif remove:
+ for server in remove:
+ diff_servers.remove(server)
+ if dns_servers_new != diff_servers:
+ message = (
+ "DNS server %s %sremoved and the server sequence %schanged as well" %
+ (self.array_to_string(remove), check_mode, check_mode)
+ )
+ else:
+ message = "DNS server %s %sremoved" % (self.array_to_string(remove), check_mode)
+ else:
+ message = "DNS server sequence %schanged" % check_mode
+
+ return message
+
+ @staticmethod
+ def get_not_in_list_one(list1, list2):
+ """Return entries that ore not in list one"""
+ return [x for x in list1 if x not in set(list2)]
+
+ @staticmethod
+ def array_to_string(array):
+ """Return string from array"""
+ if len(array) > 2:
+ string = (
+ ', '.join("'{0}'".format(element) for element in array[:-1]) + ', and '
+ + "'{0}'".format(str(array[-1]))
+ )
+ elif len(array) == 2:
+ string = ' and '.join("'{0}'".format(element) for element in array)
+ elif len(array) == 1:
+ string = "'{0}'".format(array[0])
+ return string
+
+ @staticmethod
+ def get_differt_entries(list1, list2):
+ """Return different entries of two lists"""
+ return [a for a in list1 + list2 if (a not in list1) or (a not in list2)]
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ type=dict(required=True, type='str', choices=['dhcp', 'static']),
+ device=dict(type='str'),
+ host_name=dict(required=False, type='str'),
+ domain=dict(required=False, type='str'),
+ dns_servers=dict(required=False, type='list', default=None, elements='str'),
+ search_domains=dict(required=False, type='list', default=None, elements='str'),
+ esxi_hostname=dict(required=False, type='str'),
+ cluster_name=dict(required=False, type='str'),
+ verbose=dict(type='bool', default=False, required=False)
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ['type', 'dhcp', ['device']],
+ ],
+ mutually_exclusive=[
+ ['cluster_name', 'host_name'],
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ dns = VmwareHostDNS(module)
+ dns.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_dns_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_dns_info.py
new file mode 100644
index 000000000..182fd2548
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_dns_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_dns_info
+short_description: Gathers info about an ESXi host's DNS configuration information
+description:
+- This module can be used to gather information about an ESXi host's DNS configuration information when ESXi hostname or Cluster name is given.
+- All parameters and VMware object names are case sensitive.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster from which the ESXi host belong to.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname to gather information from.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather DNS info about all ESXi Hosts in given Cluster
+ community.vmware.vmware_host_dns_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: cluster_name
+ delegate_to: localhost
+
+- name: Gather DNS info about ESXi Host
+ community.vmware.vmware_host_dns_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+hosts_dns_info:
+ description: metadata about DNS config from given cluster / host system
+ returned: always
+ type: dict
+ sample: {
+ "DC0_C0_H0": {
+ "dhcp": true,
+ "domain_name": "localdomain",
+ "host_name": "localhost",
+ "ip_address": [
+ "8.8.8.8"
+ ],
+ "search_domain": [
+ "localdomain"
+ ],
+ "virtual_nic_device": "vmk0"
+ }
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VmwareDnsInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareDnsInfoManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+
+ def gather_dns_info(self):
+ hosts_info = {}
+ for host in self.hosts:
+ host_info = {}
+ dns_config = host.config.network.dnsConfig
+ host_info['dhcp'] = dns_config.dhcp
+ host_info['virtual_nic_device'] = dns_config.virtualNicDevice
+ host_info['host_name'] = dns_config.hostName
+ host_info['domain_name'] = dns_config.domainName
+ host_info['ip_address'] = list(dns_config.address)
+ host_info['search_domain'] = list(dns_config.searchDomain)
+ hosts_info[host.name] = host_info
+ return hosts_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ vmware_dns_config = VmwareDnsInfoManager(module)
+ module.exit_json(changed=False, hosts_dns_info=vmware_dns_config.gather_dns_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_facts.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_facts.py
new file mode 100644
index 000000000..172ced427
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_facts.py
@@ -0,0 +1,422 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Wei Gao <gaowei3@qq.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_facts
+short_description: Gathers facts about remote ESXi hostsystem
+description:
+ - This module can be used to gathers facts like CPU, memory, datastore, network and system etc. about ESXi host system.
+ - Please specify hostname or IP address of ESXi host system as C(hostname).
+ - If hostname or IP address of vCenter is provided as C(hostname) and C(esxi_hostname) is not specified, then the
+ module will throw an error.
+ - VSAN facts added in 2.7 version.
+ - SYSTEM fact uuid added in 2.10 version.
+ - Connection state fact added in VMware collection 2.6.0.
+ - Please note that when ESXi host connection state is not C(connected), facts returned from vCenter might be stale.
+ Users are recommended to check connection state value and take appropriate decision in the playbook.
+author:
+ - Wei Gao (@woshihaoren)
+options:
+ esxi_hostname:
+ description:
+ - ESXi hostname.
+ - Host facts about the specified ESXi server will be returned.
+ - By specifying this option, you can select which ESXi hostsystem is returned if connecting to a vCenter.
+ type: str
+ show_tag:
+ description:
+ - Tags related to Host are shown if set to C(true).
+ default: false
+ type: bool
+ required: false
+ schema:
+ description:
+ - Specify the output schema desired.
+ - The 'summary' output schema is the legacy output from the module
+ - The 'vsphere' output schema is the vSphere API class definition
+ which requires pyvmomi>6.7.1
+ choices: ['summary', 'vsphere']
+ default: 'summary'
+ type: str
+ properties:
+ description:
+ - Specify the properties to retrieve.
+ - If not specified, all properties are retrieved (deeply).
+ - Results are returned in a structure identical to the vsphere API.
+ - 'Example:'
+ - ' properties: ['
+ - ' "hardware.memorySize",'
+ - ' "hardware.cpuInfo.numCpuCores",'
+ - ' "config.product.apiVersion",'
+ - ' "overallStatus"'
+ - ' ]'
+ - Only valid when C(schema) is C(vsphere).
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather vmware host facts
+ community.vmware.vmware_host_facts:
+ hostname: "{{ esxi_server }}"
+ username: "{{ esxi_username }}"
+ password: "{{ esxi_password }}"
+ register: host_facts
+ delegate_to: localhost
+
+- name: Gather vmware host facts from vCenter
+ community.vmware.vmware_host_facts:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ register: host_facts
+ delegate_to: localhost
+
+- name: Gather vmware host facts from vCenter with tag information
+ community.vmware.vmware_host_facts:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ show_tag: true
+ register: host_facts_tag
+ delegate_to: localhost
+
+- name: Get VSAN Cluster UUID from host facts
+ community.vmware.vmware_host_facts:
+ hostname: "{{ esxi_server }}"
+ username: "{{ esxi_username }}"
+ password: "{{ esxi_password }}"
+ register: host_facts
+- set_fact:
+ cluster_uuid: "{{ host_facts['ansible_facts']['vsan_cluster_uuid'] }}"
+
+- name: Gather some info from a host using the vSphere API output schema
+ community.vmware.vmware_host_facts:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ schema: vsphere
+ properties:
+ - hardware.memorySize
+ - hardware.cpuInfo.numCpuCores
+ - config.product.apiVersion
+ - overallStatus
+ register: host_facts
+
+- name: Gather information about powerstate and connection state
+ community.vmware.vmware_host_facts:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ schema: vsphere
+ properties:
+ - runtime.connectionState
+ - runtime.powerState
+
+- name: How to retrieve Product, Version, Build, Update info for ESXi from vCenter
+ block:
+ - name: Gather product version info for ESXi from vCenter
+ community.vmware.vmware_host_facts:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ schema: vsphere
+ properties:
+ - config.product
+ - config.option
+ register: gather_host_facts_result
+
+ - name: Extract update level info from option properties
+ set_fact:
+ update_level_info: "{{ item.value }}"
+ loop: "{{ gather_host_facts_result.ansible_facts.config.option }}"
+ when:
+ - item.key == 'Misc.HostAgentUpdateLevel'
+
+ - name: The output of Product, Version, Build, Update info for ESXi
+ debug:
+ msg:
+ - "Product : {{ gather_host_facts_result.ansible_facts.config.product.name }}"
+ - "Version : {{ gather_host_facts_result.ansible_facts.config.product.version }}"
+ - "Build : {{ gather_host_facts_result.ansible_facts.config.product.build }}"
+ - "Update : {{ update_level_info }}"
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: system info about the host machine
+ returned: always
+ type: dict
+ sample:
+ {
+ "ansible_all_ipv4_addresses": [
+ "10.76.33.200"
+ ],
+ "ansible_bios_date": "2011-01-01T00:00:00+00:00",
+ "ansible_bios_version": "0.5.1",
+ "ansible_datastore": [
+ {
+ "free": "11.63 GB",
+ "name": "datastore1",
+ "total": "12.50 GB"
+ }
+ ],
+ "ansible_distribution": "VMware ESXi",
+ "ansible_distribution_build": "4887370",
+ "ansible_distribution_version": "6.5.0",
+ "ansible_hostname": "10.76.33.100",
+ "ansible_in_maintenance_mode": true,
+ "ansible_interfaces": [
+ "vmk0"
+ ],
+ "ansible_memfree_mb": 2702,
+ "ansible_memtotal_mb": 4095,
+ "ansible_os_type": "vmnix-x86",
+ "ansible_processor": "Intel Xeon E312xx (Sandy Bridge)",
+ "ansible_processor_cores": 2,
+ "ansible_processor_count": 2,
+ "ansible_processor_vcpus": 2,
+ "ansible_product_name": "KVM",
+ "ansible_product_serial": "NA",
+ "ansible_system_vendor": "Red Hat",
+ "ansible_uptime": 1791680,
+ "ansible_uuid": "4c4c4544-0052-3410-804c-b2c04f4e3632",
+ "ansible_vmk0": {
+ "device": "vmk0",
+ "ipv4": {
+ "address": "10.76.33.100",
+ "netmask": "255.255.255.0"
+ },
+ "macaddress": "52:54:00:56:7d:59",
+ "mtu": 1500
+ },
+ "vsan_cluster_uuid": null,
+ "vsan_node_uuid": null,
+ "vsan_health": "unknown",
+ "tags": [
+ {
+ "category_id": "urn:vmomi:InventoryServiceCategory:8eb81431-b20d-49f5-af7b-126853aa1189:GLOBAL",
+ "category_name": "host_category_0001",
+ "description": "",
+ "id": "urn:vmomi:InventoryServiceTag:e9398232-46fd-461a-bf84-06128e182a4a:GLOBAL",
+ "name": "host_tag_0001"
+ }
+ ],
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common.text.formatters import bytes_to_human
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ vmware_argument_spec,
+ find_obj,
+ ansible_date_time_facts
+)
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+
+
+class VMwareHostFactManager(PyVmomi):
+ def __init__(self, module):
+ super(VMwareHostFactManager, self).__init__(module)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ if self.is_vcenter():
+ if esxi_host_name is None:
+ self.module.fail_json(msg="Connected to a vCenter system without specifying esxi_hostname")
+ self.host = self.get_all_host_objs(esxi_host_name=esxi_host_name)
+ if len(self.host) > 1:
+ self.module.fail_json(msg="esxi_hostname matched multiple hosts")
+ self.host = self.host[0]
+ self.esxi_time = None
+ else:
+ self.host = find_obj(self.content, [vim.HostSystem], None)
+ self.esxi_time = self.si.CurrentTime()
+
+ if self.host is None:
+ self.module.fail_json(msg="Failed to find host system.")
+
+ def all_facts(self):
+ ansible_facts = {}
+ ansible_facts.update(self.get_cpu_facts())
+ ansible_facts.update(self.get_memory_facts())
+ ansible_facts.update(self.get_datastore_facts())
+ ansible_facts.update(self.get_network_facts())
+ ansible_facts.update(self.get_system_facts())
+ ansible_facts.update(self.get_vsan_facts())
+ ansible_facts.update(self.get_cluster_facts())
+ ansible_facts.update({'host_date_time': ansible_date_time_facts(self.esxi_time)})
+ if self.params.get('show_tag'):
+ vmware_client = VmwareRestClient(self.module)
+ tag_info = {
+ 'tags': vmware_client.get_tags_for_hostsystem(hostsystem_mid=self.host._moId)
+ }
+ ansible_facts.update(tag_info)
+
+ self.module.exit_json(changed=False, ansible_facts=ansible_facts)
+
+ def get_cluster_facts(self):
+ cluster_facts = {'cluster': None}
+ if self.host.parent and isinstance(self.host.parent, vim.ClusterComputeResource):
+ cluster_facts.update(cluster=self.host.parent.name)
+ return cluster_facts
+
+ def get_vsan_facts(self):
+ config_mgr = self.host.configManager.vsanSystem
+ ret = {
+ 'vsan_cluster_uuid': None,
+ 'vsan_node_uuid': None,
+ 'vsan_health': "unknown",
+ }
+
+ if config_mgr is None:
+ return ret
+
+ try:
+ status = config_mgr.QueryHostStatus()
+ except (vmodl.fault.HostNotConnected, vmodl.fault.HostNotReachable):
+ return {
+ 'vsan_cluster_uuid': 'NA',
+ 'vsan_node_uuid': 'NA',
+ 'vsan_health': 'NA',
+ }
+ except Exception as err:
+ self.module.fail_json(msg="Unable to query VSAN status due to %s" % to_native(err))
+
+ return {
+ 'vsan_cluster_uuid': status.uuid,
+ 'vsan_node_uuid': status.nodeUuid,
+ 'vsan_health': status.health,
+ }
+
+ def get_cpu_facts(self):
+ return {
+ 'ansible_processor': self.host.summary.hardware.cpuModel,
+ 'ansible_processor_cores': self.host.summary.hardware.numCpuCores,
+ 'ansible_processor_count': self.host.summary.hardware.numCpuPkgs,
+ 'ansible_processor_vcpus': self.host.summary.hardware.numCpuThreads,
+ }
+
+ def get_memory_facts(self):
+ memory_size = self.host.hardware.memorySize
+ overall_memory = 0
+ if self.host.summary.quickStats.overallMemoryUsage:
+ overall_memory = self.host.summary.quickStats.overallMemoryUsage
+ memory_total = memory_size // 1024 // 1024
+ return {
+ 'ansible_memfree_mb': memory_total - overall_memory,
+ 'ansible_memtotal_mb': memory_total,
+ }
+
+ def get_datastore_facts(self):
+ facts = dict()
+ facts['ansible_datastore'] = []
+ for store in self.host.datastore:
+ _tmp = {
+ 'name': store.summary.name,
+ 'total': bytes_to_human(store.summary.capacity),
+ 'free': bytes_to_human(store.summary.freeSpace),
+ }
+ facts['ansible_datastore'].append(_tmp)
+ return facts
+
+ def get_network_facts(self):
+ facts = dict()
+ facts['ansible_interfaces'] = []
+ facts['ansible_all_ipv4_addresses'] = []
+ for nic in self.host.config.network.vnic:
+ device = nic.device
+ facts['ansible_interfaces'].append(device)
+ facts['ansible_all_ipv4_addresses'].append(nic.spec.ip.ipAddress)
+ _tmp = {
+ 'device': device,
+ 'ipv4': {
+ 'address': nic.spec.ip.ipAddress,
+ 'netmask': nic.spec.ip.subnetMask,
+ },
+ 'macaddress': nic.spec.mac,
+ 'mtu': nic.spec.mtu,
+ }
+ facts['ansible_' + device] = _tmp
+ return facts
+
+ def get_system_facts(self):
+ sn = 'NA'
+ for info in self.host.hardware.systemInfo.otherIdentifyingInfo:
+ if info.identifierType.key == 'ServiceTag':
+ sn = info.identifierValue
+ facts = {
+ 'ansible_host_connection_state': self.host.runtime.connectionState,
+ 'ansible_distribution': self.host.config.product.name,
+ 'ansible_distribution_version': self.host.config.product.version,
+ 'ansible_distribution_build': self.host.config.product.build,
+ 'ansible_os_type': self.host.config.product.osType,
+ 'ansible_system_vendor': self.host.hardware.systemInfo.vendor,
+ 'ansible_hostname': self.host.summary.config.name,
+ 'ansible_product_name': self.host.hardware.systemInfo.model,
+ 'ansible_product_serial': sn,
+ 'ansible_bios_date': self.host.hardware.biosInfo.releaseDate,
+ 'ansible_bios_version': self.host.hardware.biosInfo.biosVersion,
+ 'ansible_uptime': self.host.summary.quickStats.uptime,
+ 'ansible_in_maintenance_mode': self.host.runtime.inMaintenanceMode,
+ 'ansible_uuid': self.host.hardware.systemInfo.uuid,
+ }
+ return facts
+
+ def properties_facts(self):
+ ansible_facts = self.to_json(self.host, self.params.get('properties'))
+ if self.params.get('show_tag'):
+ vmware_client = VmwareRestClient(self.module)
+ tag_info = {
+ 'tags': vmware_client.get_tags_for_hostsystem(hostsystem_mid=self.host._moId)
+ }
+ ansible_facts.update(tag_info)
+
+ self.module.exit_json(changed=False, ansible_facts=ansible_facts)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ esxi_hostname=dict(type='str', required=False),
+ show_tag=dict(type='bool', default=False),
+ schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'),
+ properties=dict(type='list', elements='str')
+ )
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ vm_host_manager = VMwareHostFactManager(module)
+
+ if module.params['schema'] == 'summary':
+ vm_host_manager.all_facts()
+ else:
+ vm_host_manager.properties_facts()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_feature_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_feature_info.py
new file mode 100644
index 000000000..33c33a562
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_feature_info.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_feature_info
+short_description: Gathers info about an ESXi host's feature capability information
+description:
+- This module can be used to gather information about an ESXi host's feature capability information when ESXi hostname or Cluster name is given.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster from all host systems to be used for information gathering.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname to gather information from.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather feature capability info about all ESXi Hosts in given Cluster
+ community.vmware.vmware_host_feature_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ delegate_to: localhost
+ register: all_cluster_hosts_info
+
+- name: Check if ESXi is vulnerable for Speculative Store Bypass Disable (SSBD) vulnerability
+ community.vmware.vmware_host_feature_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ register: features_set
+- set_fact:
+ ssbd : "{{ item.value }}"
+ loop: "{{ features_set.host_feature_info[esxi_hostname] |json_query(name) }}"
+ vars:
+ name: "[?key=='cpuid.SSBD']"
+- assert:
+ that:
+ - ssbd|int == 1
+ when: ssbd is defined
+'''
+
+RETURN = r'''
+hosts_feature_info:
+ description: metadata about host's feature capability information
+ returned: always
+ type: dict
+ sample: {
+ "10.76.33.226": [
+ {
+ "feature_name": "cpuid.3DNOW",
+ "key": "cpuid.3DNOW",
+ "value": "0"
+ },
+ {
+ "feature_name": "cpuid.3DNOWPLUS",
+ "key": "cpuid.3DNOWPLUS",
+ "value": "0"
+ },
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class FeatureCapabilityInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(FeatureCapabilityInfoManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+
+ def gather_host_feature_info(self):
+ host_feature_info = dict()
+ for host in self.hosts:
+ host_feature_capabilities = host.config.featureCapability
+ capability = []
+ for fc in host_feature_capabilities:
+ temp_dict = {
+ 'key': fc.key,
+ 'feature_name': fc.featureName,
+ 'value': fc.value,
+ }
+ capability.append(temp_dict)
+
+ host_feature_info[host.name] = capability
+
+ return host_feature_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True,
+ )
+
+ host_capability_manager = FeatureCapabilityInfoManager(module)
+ module.exit_json(changed=False,
+ hosts_feature_info=host_capability_manager.gather_host_feature_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_firewall_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_firewall_info.py
new file mode 100644
index 000000000..f735ac651
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_firewall_info.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_firewall_info
+short_description: Gathers info about an ESXi host's firewall configuration information
+description:
+- This module can be used to gather information about an ESXi host's firewall configuration information when ESXi hostname or Cluster name is given.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster from which the ESXi host belong to.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname to gather information from.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather firewall info about all ESXi Host in given Cluster
+ community.vmware.vmware_host_firewall_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: cluster_name
+ delegate_to: localhost
+
+- name: Gather firewall info about ESXi Host
+ community.vmware.vmware_host_firewall_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+hosts_firewall_info:
+ description: metadata about host's firewall configuration
+ returned: on success
+ type: dict
+ sample: {
+ "esxi_hostname_0001": [
+ {
+ "allowed_hosts": {
+ "all_ip": true,
+ "ip_address": [
+ "10.10.10.1",
+ ],
+ "ip_network": [
+ "11.111.112.0/22",
+ "192.168.10.1/24"
+ ],
+ },
+ "enabled": true,
+ "key": "CIMHttpServer",
+ "rule": [
+ {
+ "direction": "inbound",
+ "end_port": null,
+ "port": 5988,
+ "port_type": "dst",
+ "protocol": "tcp"
+ }
+ ],
+ "service": "sfcbd-watchdog"
+ },
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class FirewallInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(FirewallInfoManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+
+ @staticmethod
+ def normalize_rule_set(rule_obj):
+ rule_dict = dict()
+ rule_dict['key'] = rule_obj.key
+ rule_dict['service'] = rule_obj.service
+ rule_dict['enabled'] = rule_obj.enabled
+ rule_dict['rule'] = []
+
+ for rule in rule_obj.rule:
+ rule_set_dict = dict()
+ rule_set_dict['port'] = rule.port
+ rule_set_dict['end_port'] = rule.endPort
+ rule_set_dict['direction'] = rule.direction
+ rule_set_dict['port_type'] = rule.portType
+ rule_set_dict['protocol'] = rule.protocol
+ rule_dict['rule'].append(rule_set_dict)
+
+ allowed_host = rule_obj.allowedHosts
+ rule_allow_host = dict()
+ rule_allow_host['ip_address'] = list(allowed_host.ipAddress)
+ rule_allow_host['ip_network'] = [ip.network + "/" + str(ip.prefixLength) for ip in allowed_host.ipNetwork]
+ rule_allow_host['all_ip'] = allowed_host.allIp
+ rule_dict['allowed_hosts'] = rule_allow_host
+ return rule_dict
+
+ def gather_host_firewall_info(self):
+ hosts_firewall_info = dict()
+ for host in self.hosts:
+ firewall_system = host.configManager.firewallSystem
+ if firewall_system:
+ hosts_firewall_info[host.name] = []
+ for rule_set_obj in firewall_system.firewallInfo.ruleset:
+ hosts_firewall_info[host.name].append(self.normalize_rule_set(rule_obj=rule_set_obj))
+ return hosts_firewall_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ vmware_host_firewall = FirewallInfoManager(module)
+ module.exit_json(changed=False, hosts_firewall_info=vmware_host_firewall.gather_host_firewall_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_firewall_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_firewall_manager.py
new file mode 100644
index 000000000..62e6ea638
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_firewall_manager.py
@@ -0,0 +1,433 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_firewall_manager
+short_description: Manage firewall configurations about an ESXi host
+description:
+- This module can be used to manage firewall configurations about an ESXi host when ESXi hostname or Cluster name is given.
+author:
+- Abhijeet Kasurde (@Akasurde)
+- Aaron Longchamps (@alongchamps)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - Firewall settings are applied to every ESXi host system in given cluster.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname.
+ - Firewall settings are applied to this ESXi host system.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+ rules:
+ description:
+ - A list of Rule set which needs to be managed.
+ - Each member of list is rule set name and state to be set the rule.
+ - Both rule name and rule state are required parameters.
+ - Additional IPs and networks can also be specified
+ - Please see examples for more information.
+ default: []
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Rule set name.
+ type: str
+ required: true
+ enabled:
+ description:
+ - Whether the rule set is enabled or not.
+ type: bool
+ required: true
+ allowed_hosts:
+ description:
+ - Define the allowed hosts for this rule set.
+ type: dict
+ suboptions:
+ all_ip:
+ description:
+ - Whether all hosts should be allowed or not.
+ type: bool
+ required: true
+ ip_address:
+ description:
+ - List of allowed IP addresses.
+ type: list
+ elements: str
+ default: []
+ ip_network:
+ description:
+ - List of allowed IP networks.
+ type: list
+ elements: str
+ default: []
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Enable vvold rule set for all ESXi Host in given Cluster
+ community.vmware.vmware_host_firewall_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: cluster_name
+ rules:
+ - name: vvold
+ enabled: true
+ allowed_hosts:
+ all_ip: true
+ delegate_to: localhost
+
+- name: Enable vvold rule set for an ESXi Host
+ community.vmware.vmware_host_firewall_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ rules:
+ - name: vvold
+ enabled: true
+ allowed_hosts:
+ all_ip: true
+ delegate_to: localhost
+
+- name: Manage multiple rule set for an ESXi Host
+ community.vmware.vmware_host_firewall_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ rules:
+ - name: vvold
+ enabled: true
+ allowed_hosts:
+ all_ip: true
+ - name: CIMHttpServer
+ enabled: false
+ delegate_to: localhost
+
+- name: Manage IP and network based firewall permissions for ESXi
+ community.vmware.vmware_host_firewall_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ rules:
+ - name: gdbserver
+ enabled: true
+ allowed_hosts:
+ all_ip: false
+ ip_address:
+ - 192.168.20.10
+ - 192.168.20.11
+ - name: CIMHttpServer
+ enabled: true
+ allowed_hosts:
+ all_ip: false
+ ip_network:
+ - 192.168.100.0/24
+ - name: remoteSerialPort
+ enabled: true
+ allowed_hosts:
+ all_ip: false
+ ip_address:
+ - 192.168.100.11
+ ip_network:
+ - 192.168.200.0/24
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+rule_set_state:
+ description:
+ - dict with hostname as key and dict with firewall rule set facts as value
+ returned: success
+ type: dict
+ sample: {
+ "rule_set_state": {
+ "localhost.localdomain": {
+ "CIMHttpServer": {
+ "current_state": false,
+ "desired_state": false,
+ "previous_state": true,
+ "allowed_hosts": {
+ "current_allowed_all": true,
+ "previous_allowed_all": true,
+ "desired_allowed_all": true,
+ "current_allowed_ip": [],
+ "previous_allowed_ip": [],
+ "desired_allowed_ip": [],
+ "current_allowed_networks": [],
+ "previous_allowed_networks": [],
+ "desired_allowed_networks": [],
+ }
+ },
+ "remoteSerialPort": {
+ "current_state": true,
+ "desired_state": true,
+ "previous_state": true,
+ "allowed_hosts": {
+ "current_allowed_all": false,
+ "previous_allowed_all": true,
+ "desired_allowed_all": false,
+ "current_allowed_ip": ["192.168.100.11"],
+ "previous_allowed_ip": [],
+ "desired_allowed_ip": ["192.168.100.11"],
+ "current_allowed_networks": ["192.168.200.0/24"],
+ "previous_allowed_networks": [],
+ "desired_allowed_networks": ["192.168.200.0/24"],
+ }
+ }
+ }
+ }
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+from ansible.module_utils._text import to_native
+import socket
+
+
+def is_ipaddress(value):
+ try:
+ socket.inet_aton(value)
+ except socket.error:
+ try:
+ socket.inet_pton(socket.AF_INET6, value)
+ except socket.error:
+ return False
+ return True
+
+
+class VmwareFirewallManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareFirewallManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.options = self.params.get('options', dict())
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ self.firewall_facts = dict()
+ self.rule_options = self.module.params.get("rules")
+ self.gather_rule_set()
+
+ def gather_rule_set(self):
+ for host in self.hosts:
+ self.firewall_facts[host.name] = {}
+ firewall_system = host.configManager.firewallSystem
+ if firewall_system:
+ for rule_set_obj in firewall_system.firewallInfo.ruleset:
+ temp_rule_dict = dict()
+ temp_rule_dict['enabled'] = rule_set_obj.enabled
+ allowed_host = rule_set_obj.allowedHosts
+ rule_allow_host = dict()
+ rule_allow_host['ip_address'] = allowed_host.ipAddress
+ rule_allow_host['ip_network'] = [ip.network + "/" + str(ip.prefixLength) for ip in allowed_host.ipNetwork]
+ rule_allow_host['all_ip'] = allowed_host.allIp
+ temp_rule_dict['allowed_hosts'] = rule_allow_host
+ self.firewall_facts[host.name][rule_set_obj.key] = temp_rule_dict
+
+ def check_params(self):
+ rules_by_host = {}
+ for host in self.hosts:
+ rules_by_host[host.name] = self.firewall_facts[host.name].keys()
+
+ for rule_option in self.rule_options:
+ rule_name = rule_option.get('name')
+ hosts_with_rule_name = [h for h, r in rules_by_host.items() if rule_name in r]
+ hosts_without_rule_name = set([i.name for i in self.hosts]) - set(hosts_with_rule_name)
+ if hosts_without_rule_name:
+ self.module.fail_json(msg="rule named '%s' wasn't found on hosts: %s" % (
+ rule_name, hosts_without_rule_name))
+
+ allowed_hosts = rule_option.get('allowed_hosts')
+ if allowed_hosts is not None:
+ for ip_address in allowed_hosts.get('ip_address'):
+ try:
+ is_ipaddress(ip_address)
+ except ValueError:
+ self.module.fail_json(msg="The provided IP address %s is not a valid IP"
+ " for the rule %s" % (ip_address, rule_name))
+
+ for ip_network in allowed_hosts.get('ip_network'):
+ try:
+ is_ipaddress(ip_network)
+ except ValueError:
+ self.module.fail_json(msg="The provided IP network %s is not a valid network"
+ " for the rule %s" % (ip_network, rule_name))
+
+ def ensure(self):
+ """
+ Function to ensure rule set configuration
+
+ """
+ fw_change_list = []
+ enable_disable_changed = False
+ allowed_ip_changed = False
+ results = dict(changed=False, rule_set_state=dict())
+ for host in self.hosts:
+ firewall_system = host.configManager.firewallSystem
+ if firewall_system is None:
+ continue
+ results['rule_set_state'][host.name] = {}
+ for rule_option in self.rule_options:
+ rule_name = rule_option.get('name', None)
+
+ current_rule_state = self.firewall_facts[host.name][rule_name]['enabled']
+ if current_rule_state != rule_option['enabled']:
+ try:
+ if not self.module.check_mode:
+ if rule_option['enabled']:
+ firewall_system.EnableRuleset(id=rule_name)
+ else:
+ firewall_system.DisableRuleset(id=rule_name)
+ # keep track of changes as we go
+ enable_disable_changed = True
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(msg="Failed to enable rule set %s as"
+ " rule set id is unknown : %s" % (
+ rule_name,
+ to_native(not_found.msg)))
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(msg="Failed to enabled rule set %s as an internal"
+ " error happened while reconfiguring"
+ " rule set : %s" % (
+ rule_name,
+ to_native(host_config_fault.msg)))
+
+ # save variables here for comparison later and change tracking
+ # also covers cases where inputs may be null
+ permitted_networking = self.firewall_facts[host.name][rule_name]
+ rule_allows_all = permitted_networking['allowed_hosts']['all_ip']
+ rule_allowed_ips = set(permitted_networking['allowed_hosts']['ip_address'])
+ rule_allowed_networks = set(permitted_networking['allowed_hosts']['ip_network'])
+
+ allowed_hosts = rule_option.get('allowed_hosts')
+ playbook_allows_all = False if allowed_hosts is None else allowed_hosts.get('all_ip')
+ playbook_allowed_ips = set([]) if allowed_hosts is None else set(allowed_hosts.get('ip_address'))
+ playbook_allowed_networks = set([]) if allowed_hosts is None else set(allowed_hosts.get('ip_network'))
+
+ # compare what is configured on the firewall rule with what the playbook provides
+ allowed_all_ips_different = bool(rule_allows_all != playbook_allows_all)
+ ip_list_different = bool(rule_allowed_ips != playbook_allowed_ips)
+ ip_network_different = bool(rule_allowed_networks != playbook_allowed_networks)
+
+ # apply everything here in one function call
+ if allowed_all_ips_different is True or ip_list_different is True or ip_network_different is True:
+ try:
+ allowed_ip_changed = True
+ if not self.module.check_mode:
+ # setup spec
+ firewall_spec = vim.host.Ruleset.RulesetSpec()
+ firewall_spec.allowedHosts = vim.host.Ruleset.IpList()
+ firewall_spec.allowedHosts.allIp = playbook_allows_all
+ firewall_spec.allowedHosts.ipAddress = list(playbook_allowed_ips)
+ firewall_spec.allowedHosts.ipNetwork = []
+
+ for i in playbook_allowed_networks:
+ address, mask = i.split('/')
+ tmp_ip_network_spec = vim.host.Ruleset.IpNetwork()
+ tmp_ip_network_spec.network = address
+ tmp_ip_network_spec.prefixLength = int(mask)
+ firewall_spec.allowedHosts.ipNetwork.append(tmp_ip_network_spec)
+
+ firewall_system.UpdateRuleset(id=rule_name, spec=firewall_spec)
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(msg="Failed to configure rule set %s as"
+ " rule set id is unknown : %s" % (rule_name,
+ to_native(not_found.msg)))
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(msg="Failed to configure rule set %s as an internal"
+ " error happened while reconfiguring"
+ " rule set : %s" % (rule_name,
+ to_native(host_config_fault.msg)))
+ except vim.fault.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg="Failed to configure the rule set %s as a runtime"
+ " error happened while applying the reconfiguration:"
+ " %s" % (rule_name, to_native(runtime_fault.msg)))
+
+ results['rule_set_state'][host.name][rule_name] = {
+ 'current_state': rule_option['enabled'],
+ 'previous_state': current_rule_state,
+ 'desired_state': rule_option['enabled'],
+ 'allowed_hosts': {
+ 'current_allowed_all': playbook_allows_all,
+ 'previous_allowed_all': permitted_networking['allowed_hosts']['all_ip'],
+ 'desired_allowed_all': playbook_allows_all,
+ 'current_allowed_ip': playbook_allowed_ips,
+ 'previous_allowed_ip': set(permitted_networking['allowed_hosts']['ip_address']),
+ 'desired_allowed_ip': playbook_allowed_ips,
+ 'current_allowed_networks': playbook_allowed_networks,
+ 'previous_allowed_networks': set(permitted_networking['allowed_hosts']['ip_network']),
+ 'desired_allowed_networks': playbook_allowed_networks,
+ }
+ }
+
+ if enable_disable_changed or allowed_ip_changed:
+ fw_change_list.append(True)
+
+ if any(fw_change_list):
+ results['changed'] = True
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ rules=dict(
+ type='list',
+ default=list(),
+ required=False,
+ elements='dict',
+ options=dict(
+ name=dict(type='str', required=True),
+ enabled=dict(type='bool', required=True),
+ allowed_hosts=dict(
+ type='dict',
+ options=dict(
+ all_ip=dict(type='bool', required=True),
+ ip_address=dict(type='list', elements='str', default=list()),
+ ip_network=dict(type='list', elements='str', default=list()),
+ ),
+ ),
+ ),
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ vmware_firewall_manager = VmwareFirewallManager(module)
+ vmware_firewall_manager.check_params()
+ vmware_firewall_manager.ensure()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_hyperthreading.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_hyperthreading.py
new file mode 100644
index 000000000..e3431ca7b
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_hyperthreading.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_hyperthreading
+short_description: Enables/Disables Hyperthreading optimization for an ESXi host system
+description:
+- This module can be used to enable or disable Hyperthreading optimization for ESXi host systems in given vCenter infrastructure.
+- It also checks if Hyperthreading is activated/deactivated and if the host needs to be restarted.
+- The module informs the user if Hyperthreading is enabled but inactive because the processor is vulnerable to L1 Terminal Fault (L1TF).
+author:
+- Christian Kotte (@ckotte)
+options:
+ state:
+ description:
+ - Enable or disable Hyperthreading.
+ - You need to reboot the ESXi host if you change the configuration.
+ - Make sure that Hyperthreading is enabled in the BIOS. Otherwise, it will be enabled, but never activated.
+ type: str
+ choices: [ enabled, disabled ]
+ default: 'enabled'
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - This parameter is required if C(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - This parameter is required if C(esxi_hostname) is not specified.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Enable Hyperthreading for an host system
+ community.vmware.vmware_host_hyperthreading:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: enabled
+ delegate_to: localhost
+
+- name: Disable Hyperthreading for an host system
+ community.vmware.vmware_host_hyperthreading:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: disabled
+ delegate_to: localhost
+
+- name: Disable Hyperthreading for all host systems from cluster
+ community.vmware.vmware_host_hyperthreading:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ state: disabled
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+results:
+ description: metadata about host system's Hyperthreading configuration
+ returned: always
+ type: dict
+ sample: {
+ "esxi01": {
+ "msg": "Hyperthreading is already enabled and active for host 'esxi01'",
+ "state_current": "active",
+ "state": "enabled",
+ },
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VmwareHostHyperthreading(PyVmomi):
+ """Manage Hyperthreading for an ESXi host system"""
+
+ def __init__(self, module):
+ super(VmwareHostHyperthreading, self).__init__(module)
+ cluster_name = self.params.get('cluster_name')
+ esxi_host_name = self.params.get('esxi_hostname')
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system.")
+
+ def ensure(self):
+ """Manage Hyperthreading for an ESXi host system"""
+ results = dict(changed=False, result=dict())
+ desired_state = self.params.get('state')
+ host_change_list = []
+ for host in self.hosts:
+ changed = False
+ results['result'][host.name] = dict(msg='')
+
+ hyperthreading_info = host.config.hyperThread
+
+ results['result'][host.name]['state'] = desired_state
+ if desired_state == 'enabled':
+ # Don't do anything if Hyperthreading is already enabled
+ if hyperthreading_info.config:
+ if hyperthreading_info.active:
+ results['result'][host.name]['changed'] = False
+ results['result'][host.name]['state_current'] = "active"
+ results['result'][host.name]['msg'] = "Hyperthreading is enabled and active"
+ if not hyperthreading_info.active:
+ # L1 Terminal Fault (L1TF)/Foreshadow mitigation workaround (https://kb.vmware.com/s/article/55806)
+ option_manager = host.configManager.advancedOption
+ try:
+ mitigation = option_manager.QueryOptions('VMkernel.Boot.hyperthreadingMitigation')
+ except vim.fault.InvalidName:
+ mitigation = None
+ if mitigation and mitigation[0].value:
+ results['result'][host.name]['changed'] = False
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = ("Hyperthreading is enabled, but not active because the"
+ " processor is vulnerable to L1 Terminal Fault (L1TF).")
+ else:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = ("Hyperthreading is enabled, but not active."
+ " A reboot is required!")
+ # Enable Hyperthreading
+ else:
+ # Check if Hyperthreading is available
+ if hyperthreading_info.available:
+ if not self.module.check_mode:
+ try:
+ host.configManager.cpuScheduler.EnableHyperThreading()
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "disabled"
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = (
+ "Hyperthreading enabled for host. Reboot the host to activate it."
+ )
+ except vmodl.fault.NotSupported as not_supported:
+ # This should never happen since Hyperthreading is available
+ self.module.fail_json(
+ msg="Failed to enable Hyperthreading for host '%s' : %s" %
+ (host.name, to_native(not_supported.msg))
+ )
+ except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
+ self.module.fail_json(
+ msg="Failed to enable Hyperthreading for host '%s' due to : %s" %
+ (host.name, to_native(runtime_fault.msg))
+ )
+ else:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "disabled"
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = "Hyperthreading will be enabled"
+ else:
+ self.module.fail_json(msg="Hyperthreading optimization is not available for host '%s'" % host.name)
+ elif desired_state == 'disabled':
+ # Don't do anything if Hyperthreading is already disabled
+ if not hyperthreading_info.config:
+ if not hyperthreading_info.active:
+ results['result'][host.name]['changed'] = False
+ results['result'][host.name]['state_current'] = "inactive"
+ results['result'][host.name]['msg'] = "Hyperthreading is disabled and inactive"
+ if hyperthreading_info.active:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_current'] = "disabled"
+ results['result'][host.name]['msg'] = ("Hyperthreading is already disabled"
+ " but still active. A reboot is required!")
+ # Disable Hyperthreading
+ else:
+ # Check if Hyperthreading is available
+ if hyperthreading_info.available:
+ if not self.module.check_mode:
+ try:
+ host.configManager.cpuScheduler.DisableHyperThreading()
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "enabled"
+ results['result'][host.name]['state_current'] = "disabled"
+ results['result'][host.name]['msg'] = (
+ "Hyperthreading disabled. Reboot the host to deactivate it."
+ )
+ except vmodl.fault.NotSupported as not_supported:
+ # This should never happen since Hyperthreading is available
+ self.module.fail_json(
+ msg="Failed to disable Hyperthreading for host '%s' : %s" %
+ (host.name, to_native(not_supported.msg))
+ )
+ except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
+ self.module.fail_json(
+ msg="Failed to disable Hyperthreading for host '%s' due to : %s" %
+ (host.name, to_native(runtime_fault.msg))
+ )
+ else:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "enabled"
+ results['result'][host.name]['state_current'] = "disabled"
+ results['result'][host.name]['msg'] = "Hyperthreading will be disabled"
+ else:
+ self.module.fail_json(msg="Hyperthreading optimization is not available for host '%s'" % host.name)
+
+ host_change_list.append(changed)
+
+ if any(host_change_list):
+ results['changed'] = True
+ self.module.exit_json(**results)
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ state=dict(default='enabled', choices=['enabled', 'disabled']),
+ esxi_hostname=dict(type='str', required=False),
+ cluster_name=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ hyperthreading = VmwareHostHyperthreading(module)
+ hyperthreading.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_ipv6.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_ipv6.py
new file mode 100644
index 000000000..320e4a185
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_ipv6.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_ipv6
+short_description: Enables/Disables IPv6 support for an ESXi host system
+description:
+- This module can be used to enable or disable IPv6 support for ESXi host systems in given vCenter infrastructure.
+- It also checks if the host needs to be restarted.
+author:
+- Christian Kotte (@ckotte) <christian.kotte@gmx.de>
+options:
+ state:
+ description:
+ - Enable or disable IPv6 support.
+ - You need to reboot the ESXi host if you change the configuration.
+ type: str
+ choices: [ enabled, disabled ]
+ default: 'enabled'
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - This is required parameter if C(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - This is required parameter if C(esxi_hostname) is not specified.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Enable IPv6 for an host system
+ community.vmware.vmware_host_ipv6:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: enabled
+ delegate_to: localhost
+
+- name: Disable IPv6 for an host system
+ community.vmware.vmware_host_ipv6:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: disabled
+ delegate_to: localhost
+
+- name: Disable IPv6 for all host systems from cluster
+ community.vmware.vmware_host_ipv6:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ state: disabled
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: metadata about host system's IPv6 configuration
+ returned: always
+ type: dict
+ sample: {
+ "esxi01": {
+ "changed": false,
+ "msg": "IPv6 is already enabled and active for host 'esxi01'",
+ },
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VmwareHostIPv6(PyVmomi):
+ """Class to manage IPv6 for an ESXi host system"""
+
+ def __init__(self, module):
+ super(VmwareHostIPv6, self).__init__(module)
+ cluster_name = self.params.get('cluster_name')
+ esxi_host_name = self.params.get('esxi_hostname')
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system with given configuration.")
+
+ def ensure(self):
+ """Manage IPv6 for an ESXi host system"""
+ results = dict(changed=False, result=dict())
+ desired_state = self.module.params['state']
+
+ host_change_list = []
+ for host in self.hosts:
+ changed = False
+ results['result'][host.name] = dict(msg='')
+
+ host_network_system = host.configManager.networkSystem
+ host_network_info = host_network_system.networkInfo
+
+ if desired_state == 'enabled':
+ # Don't do anything if IPv6 is already enabled
+ if host_network_info.atBootIpV6Enabled:
+ if host_network_info.ipV6Enabled:
+ results['result'][host.name]['msg'] = "IPv6 is already enabled and active for host '%s'" % \
+ host.name
+ if not host_network_info.ipV6Enabled:
+ results['result'][host.name]['msg'] = ("IPv6 is already enabled for host '%s', but a reboot"
+ " is required!" % host.name)
+ # Enable IPv6
+ else:
+ if not self.module.check_mode:
+ try:
+ config = vim.host.NetworkConfig()
+ config.ipV6Enabled = True
+ host_network_system.UpdateNetworkConfig(config, "modify")
+ changed = True
+ results['result'][host.name]['changed'] = True
+ results['result'][host.name]['msg'] = "IPv6 enabled for host '%s'" % host.name
+ except (vim.fault.AlreadyExists, vim.fault.NotFound):
+ self.module.fail_json(msg="Network entity specified in the configuration for host '%s'"
+ " already exists" % host.name)
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(msg="Invalid parameter specified for host '%s' : %s" %
+ (host.name, to_native(invalid_argument.msg)))
+ except vim.fault.HostConfigFault as config_fault:
+ self.module.fail_json(msg="Failed to enable IPv6 for host '%s' due to : %s" %
+ (host.name, to_native(config_fault.msg)))
+ except vmodl.fault.NotSupported as not_supported:
+ self.module.fail_json(msg="Failed to enable IPv6 for host '%s' due to : %s" %
+ (host.name, to_native(not_supported.msg)))
+ except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
+ self.module.fail_json(msg="Failed to enable IPv6 for host '%s' due to : %s" %
+ (host.name, to_native(runtime_fault.msg)))
+ else:
+ changed = True
+ results['result'][host.name]['changed'] = True
+ results['result'][host.name]['msg'] = "IPv6 will be enabled for host '%s'" % host.name
+ elif desired_state == 'disabled':
+ # Don't do anything if IPv6 is already disabled
+ if not host_network_info.atBootIpV6Enabled:
+ if not host_network_info.ipV6Enabled:
+ results['result'][host.name]['msg'] = "IPv6 is already disabled for host '%s'" % host.name
+ if host_network_info.ipV6Enabled:
+ changed = True
+ results['result'][host.name]['msg'] = ("IPv6 is already disabled for host '%s',"
+ " but a reboot is required!" % host.name)
+ # Disable IPv6
+ else:
+ if not self.module.check_mode:
+ try:
+ config = vim.host.NetworkConfig()
+ config.ipV6Enabled = False
+ host_network_system.UpdateNetworkConfig(config, "modify")
+ changed = True
+ results['result'][host.name]['changed'] = True
+ results['result'][host.name]['msg'] = "IPv6 disabled for host '%s'" % host.name
+ except (vim.fault.AlreadyExists, vim.fault.NotFound):
+ self.module.fail_json(msg="Network entity specified in the configuration for host '%s'"
+ " already exists" % host.name)
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(msg="Invalid parameter specified for host '%s' : %s" %
+ (host.name, to_native(invalid_argument.msg)))
+ except vim.fault.HostConfigFault as config_fault:
+ self.module.fail_json(msg="Failed to disable IPv6 for host '%s' due to : %s" %
+ (host.name, to_native(config_fault.msg)))
+ except vmodl.fault.NotSupported as not_supported:
+ self.module.fail_json(msg="Failed to disable IPv6 for host '%s' due to : %s" %
+ (host.name, to_native(not_supported.msg)))
+ except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
+ self.module.fail_json(msg="Failed to disable IPv6 for host '%s' due to : %s" %
+ (host.name, to_native(runtime_fault.msg)))
+ else:
+ changed = True
+ results['result'][host.name]['changed'] = True
+ results['result'][host.name]['msg'] = "IPv6 will be disabled for host '%s'" % host.name
+
+ host_change_list.append(changed)
+
+ if any(host_change_list):
+ results['changed'] = True
+ self.module.exit_json(**results)
+
+
+def main():
+ """
+ Main
+ """
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ state=dict(default='enabled', choices=['enabled', 'disabled']),
+ esxi_hostname=dict(type='str', required=False),
+ cluster_name=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ ipv6 = VmwareHostIPv6(module)
+ ipv6.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_iscsi.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_iscsi.py
new file mode 100644
index 000000000..f8739ab60
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_iscsi.py
@@ -0,0 +1,889 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: vmware_host_iscsi
+short_description: Manage the iSCSI configuration of ESXi host
+author:
+ - sky-joker (@sky-joker)
+description:
+ - In this module, can manage the iSCSI configuration of ESXi host
+options:
+ esxi_hostname:
+ description:
+ - The ESXi hostname on which to change iSCSI settings.
+ type: str
+ required: true
+ iscsi_config:
+ description:
+ - The iSCSI configs.
+ - This parameter is required if I(state=present) or I(state=absent).
+ type: dict
+ suboptions:
+ iscsi_name:
+ description:
+ - The name for the iSCSI HBA adapter.
+ - This is iSCSI qualified name.
+ type: str
+ aliases:
+ - initiator_iqn
+ alias:
+ description:
+ - The new value for the alias of the adapter.
+ type: str
+ default: ''
+ authentication:
+ description:
+ - CHAP authentication parent settings for iSCSI.
+ type: dict
+ suboptions:
+ chap_auth_enabled:
+ description:
+ - Whether to enable CHAP authentication.
+ type: bool
+ default: false
+ chap_authentication_type:
+ description:
+ - The preference for CHAP or non-CHAP protocol of CHAP if CHAP is enabled.
+ type: str
+ default: chapProhibited
+ choices:
+ - chapDiscouraged
+ - chapPreferred
+ - chapRequired
+ - chapProhibited
+ chap_name:
+ description:
+ - CHAP user name if CHAP is enabled.
+ type: str
+ default: ''
+ chap_secret:
+ description:
+ - The secret password of CHAP if CHAP is enabled.
+ type: str
+ mutual_chap_authentication_type:
+ description:
+ - The preference for CHAP or non-CHAP protocol of Mutual-CHAP if CHAP is enabled.
+ type: str
+ default: chapProhibited
+ choices:
+ - chapProhibited
+ - chapRequired
+ mutual_chap_name:
+ description:
+ - The user name that the target needs to use to authenticate with the initiator if Mutual-CHAP is enabled.
+ type: str
+ default: ''
+ mutual_chap_secret:
+ description:
+ - The secret password of mutual CHAP if Mutual-CHAP is enabled.
+ type: str
+ port_bind:
+ description:
+ - The list of the VMkernels if use port bindings.
+ type: list
+ elements: str
+ default: []
+ force:
+ description:
+ - Force port bind VMkernels to be removed.
+ type: bool
+ default: false
+ vmhba_name:
+ description:
+ - The iSCSI adapter name.
+ type: str
+ required: true
+ send_target:
+ description:
+ - The iSCSI dynamic target settings.
+ type: dict
+ suboptions:
+ address:
+ description:
+ - The IP address or hostname of the storage device.
+ type: str
+ required: true
+ port:
+ description:
+ - The TCP port of the storage device.
+ - If not specified, the standard default of 3260 is used.
+ type: int
+ default: 3260
+ authentication:
+ description:
+ - CHAP authentication settings of a dynamic target for iSCSI.
+ type: dict
+ suboptions:
+ chap_auth_enabled:
+ description:
+ - Whether to enable CHAP authentication.
+ type: bool
+ default: false
+ chap_authentication_type:
+ description:
+ - The preference for CHAP or non-CHAP protocol of CHAP if CHAP is enabled.
+ type: str
+ default: chapProhibited
+ choices:
+ - chapDiscouraged
+ - chapPreferred
+ - chapRequired
+ - chapProhibited
+ chap_inherited:
+ description:
+ - Whether or not to inherit CHAP settings from the parent settings.
+ type: bool
+ default: true
+ chap_name:
+ description:
+ - CHAP user name if CHAP is enabled.
+ type: str
+ default: ''
+ chap_secret:
+ description:
+ - The secret password of CHAP if CHAP is enabled.
+ type: str
+ mutual_chap_authentication_type:
+ description:
+ - The preference for CHAP or non-CHAP protocol of Mutual-CHAP if CHAP is enabled.
+ type: str
+ default: chapProhibited
+ choices:
+ - chapProhibited
+ - chapRequired
+ mutual_chap_inherited:
+ description:
+ - Whether or not to inherit Mutual-CHAP settings from the parent settings.
+ type: bool
+ default: true
+ mutual_chap_name:
+ description:
+ - The user name that the target needs to use to authenticate with the initiator if Mutual-CHAP is enabled.
+ type: str
+ default: ''
+ mutual_chap_secret:
+ description:
+ - The secret password of mutual CHAP if Mutual-CHAP is enabled.
+ type: str
+ static_target:
+ description:
+ - The iSCSI static target settings.
+ type: dict
+ suboptions:
+ iscsi_name:
+ description:
+ - The name of the iSCSI target to connect to.
+ type: str
+ required: true
+ address:
+ description:
+ - The IP address or hostname of the storage device.
+ type: str
+ required: true
+ port:
+ description:
+ - The TCP port of the storage device.
+ - If not specified, the standard default of 3260 is used.
+ type: int
+ default: 3260
+ authentication:
+ description:
+ - CHAP authentication settings of a static target for iSCSI.
+ type: dict
+ suboptions:
+ chap_auth_enabled:
+ description:
+ - Whether to enable CHAP authentication.
+ type: bool
+ default: false
+ chap_authentication_type:
+ description:
+ - The preference for CHAP or non-CHAP protocol of CHAP if CHAP is enabled.
+ type: str
+ default: chapProhibited
+ choices:
+ - chapDiscouraged
+ - chapPreferred
+ - chapRequired
+ - chapProhibited
+ chap_inherited:
+ description:
+ - Whether or not to inherit CHAP settings from the parent settings.
+ type: bool
+ default: true
+ chap_name:
+ description:
+ - CHAP user name if CHAP is enabled.
+ type: str
+ default: ''
+ chap_secret:
+ description:
+ - The secret password of CHAP if CHAP is enabled.
+ type: str
+ mutual_chap_authentication_type:
+ description:
+ - The preference for CHAP or non-CHAP protocol of Mutual-CHAP if CHAP is enabled.
+ type: str
+ default: chapProhibited
+ choices:
+ - chapProhibited
+ - chapRequired
+ mutual_chap_inherited:
+ description:
+ - Whether or not to inherit Mutual-CHAP settings from the parent settings.
+ type: bool
+ default: true
+ mutual_chap_name:
+ description:
+ - The user name that the target needs to use to authenticate with the initiator if Mutual-CHAP is enabled.
+ type: str
+ default: ''
+ mutual_chap_secret:
+ description:
+ - The secret password of mutual CHAP if Mutual-CHAP is enabled.
+ type: str
+ state:
+ description:
+ - If set to C(present), add the iSCSI target or the bind ports if they are not existing.
+ - If set to C(present), update the iSCSI settings if they already exist and occur change.
+ - If set to C(absent), remove the iSCSI target or the bind ports if they are existing.
+ - If set to (enabled), enable the iSCSI of ESXi if the iSCSI is disabled.
+ - If set to (disabled), disable the iSCSI of ESXi if the iSCSI is enabled.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ - enabled
+ - disabled
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Enable iSCSI of ESXi
+ community.vmware.vmware_host_iscsi:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ state: enabled
+
+- name: Add a dynamic target to iSCSI config of ESXi
+ community.vmware.vmware_host_iscsi:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ iscsi_config:
+ vmhba_name: vmhba65
+ send_target:
+ address: "{{ send_target_address }}"
+ state: present
+
+- name: Add a static target to iSCSI config of ESXi
+ community.vmware.vmware_host_iscsi:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ iscsi_config:
+ vmhba_name: vmhba65
+ static_target:
+ iscsi_name: iqn.2011-08.com.xxxxxxx:as6104t-8c3e9d.target001
+ address: "{{ send_target_address }}"
+ state: present
+
+- name: Add VMKernels to iSCSI config of ESXi
+ community.vmware.vmware_host_iscsi:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ iscsi_config:
+ vmhba_name: vmhba65
+ port_bind:
+ - vmk0
+ - vmk1
+ state: present
+
+- name: Use CHAP authentication
+ community.vmware.vmware_host_iscsi:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ iscsi_config:
+ vmhba_name: vmhba65
+ authentication:
+ chap_auth_enabled: true
+ chap_authentication_type: chapPreferred
+ chap_name: chap_user_name
+ chap_secret: secret
+ state: present
+
+- name: Remove a dynamic target from iSCSI config of ESXi
+ community.vmware.vmware_host_iscsi:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ iscsi_config:
+ vmhba_name: vmhba65
+ send_target:
+ address: "{{ send_target_address }}"
+ state: absent
+'''
+
+RETURN = r'''
+iscsi_properties:
+ description: Parameter return when system defaults config is changed.
+ returned: changed
+ type: dict
+ sample: >-
+ {
+ "iscsi_alias": "",
+ "iscsi_authentication_properties": {
+ "_vimtype": "vim.host.InternetScsiHba.AuthenticationProperties",
+ "chapAuthEnabled": false,
+ "chapAuthenticationType": "chapProhibited",
+ "chapInherited": null,
+ "chapName": "",
+ "chapSecret": "XXXXXXXXXXXXXXXXXXXXX",
+ "mutualChapAuthenticationType": "chapProhibited",
+ "mutualChapInherited": null,
+ "mutualChapName": "XXXXXXXXXXXXXXXXXXXXX",
+ "mutualChapSecret": ""
+ },
+ "iscsi_enabled": true,
+ "iscsi_name": "",
+ "iscsi_send_targets": [],
+ "iscsi_static_targets": [],
+ "port_bind": [],
+ "vmhba_name": "vmhba65"
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from copy import deepcopy
+
+
+class VMwareHostiScsiManager(PyVmomi):
+ def __init__(self, module):
+ super(VMwareHostiScsiManager, self).__init__(module)
+ self.esxi_hostname = self.params['esxi_hostname']
+ self.iscsi_config = self.params['iscsi_config']
+ self.state = self.params['state']
+
+ if self.iscsi_config:
+ self.iscsi_name = self.iscsi_config['iscsi_name']
+ self.alias = self.iscsi_config['alias']
+ self.authentication = self.iscsi_config['authentication']
+ self.port_bind = self.iscsi_config['port_bind']
+ self.force = self.iscsi_config['force']
+ self.vmhba_name = self.iscsi_config['vmhba_name']
+ self.send_target = self.iscsi_config['send_target']
+ self.static_target = self.iscsi_config['static_target']
+
+ def get_iscsi_config(self):
+ self.existing_system_iscsi_config = {}
+ for hba in self.host_obj.config.storageDevice.hostBusAdapter:
+ if isinstance(hba, vim.host.InternetScsiHba):
+ self.existing_system_iscsi_config.update(
+ {
+ 'vmhba_name': hba.device,
+ 'iscsi_name': hba.iScsiName,
+ 'iscsi_alias': hba.iScsiAlias,
+ 'iscsi_authentication_properties': self.to_json(hba.authenticationProperties)
+ }
+ )
+
+ iscsi_send_targets = []
+ for iscsi_send_target in self.to_json(hba.configuredSendTarget):
+ iscsi_send_targets.append({
+ 'address': iscsi_send_target['address'],
+ 'authenticationProperties': iscsi_send_target['authenticationProperties'],
+ 'port': iscsi_send_target['port']
+ })
+ self.existing_system_iscsi_config['iscsi_send_targets'] = iscsi_send_targets
+
+ iscsi_static_targets = []
+ for iscsi_static_target in self.to_json(hba.configuredStaticTarget):
+ iscsi_static_targets.append({
+ 'iscsi_name': iscsi_static_target['iScsiName'],
+ 'address': iscsi_static_target['address'],
+ 'authenticationProperties': iscsi_static_target['authenticationProperties'],
+ 'port': iscsi_static_target['port']
+ })
+ self.existing_system_iscsi_config['iscsi_static_targets'] = iscsi_static_targets
+
+ self.existing_system_iscsi_config['iscsi_enabled'] = self.to_json(self.host_obj.config.storageDevice.softwareInternetScsiEnabled)
+
+ vnic_devices = []
+ if self.iscsi_config:
+ for vnic in self.host_obj.configManager.iscsiManager.QueryBoundVnics(iScsiHbaName=self.vmhba_name):
+ vnic_devices.append(vnic.vnicDevice)
+ self.existing_system_iscsi_config['port_bind'] = vnic_devices
+
+ def check_hba_name(self):
+ if self.existing_system_iscsi_config['vmhba_name'] != self.vmhba_name:
+ self.module.fail_json(msg="%s is not an iSCSI device." % self.vmhba_name)
+
+ def diff_iscsi_config(self):
+ if self.state == 'enabled':
+ self.change_flag = True
+ if self.existing_system_iscsi_config['iscsi_enabled'] == 'true':
+ self.change_flag = False
+
+ if self.state == 'disabled':
+ self.change_flag = True
+ if self.existing_system_iscsi_config['iscsi_enabled'] == 'false':
+ self.change_flag = False
+
+ if self.state == 'present':
+ self.change_flag = True
+
+ self.add_send_interface_flag = True
+ if self.send_target:
+ for config in self.existing_system_iscsi_config['iscsi_send_targets']:
+ if config['address'] == self.send_target['address'] and config['port'] == self.send_target['port']:
+ self.change_flag = False
+ self.add_send_interface_flag = False
+
+ self.add_static_interface_flag = True
+ if self.static_target:
+ for config in self.existing_system_iscsi_config['iscsi_static_targets']:
+ if config['address'] == self.static_target['address'] and config['port'] == self.static_target['port'] \
+ and config['iscsi_name'] == self.static_target['iscsi_name']:
+ self.change_flag = False
+ self.add_static_interface_flag = False
+
+ self.update_iscsi_name_flag = False
+ if self.existing_system_iscsi_config['iscsi_name'] != self.iscsi_name and self.iscsi_name:
+ self.change_flag = True
+ self.update_iscsi_name_flag = True
+
+ self.update_alias_flag = False
+ if self.existing_system_iscsi_config['iscsi_alias'] != self.alias:
+ self.change_flag = True
+ self.update_alias_flag = True
+
+ self.update_auth_flag = False
+ if self.authentication:
+ auth_properties = self.existing_system_iscsi_config['iscsi_authentication_properties']
+ if auth_properties['chapAuthEnabled'] != self.authentication['chap_auth_enabled']:
+ self.change_flag = True
+ self.update_auth_flag = True
+ if auth_properties['chapAuthenticationType'] != self.authentication['chap_authentication_type']:
+ self.change_flag = True
+ self.update_auth_flag = True
+ if auth_properties['chapName'] != self.authentication['chap_name']:
+ self.change_flag = True
+ self.update_auth_flag = True
+ if auth_properties['mutualChapAuthenticationType'] != self.authentication['mutual_chap_authentication_type']:
+ self.change_flag = True
+ self.update_auth_flag = True
+ if auth_properties['mutualChapName'] != self.authentication['mutual_chap_name']:
+ self.change_flag = True
+ self.update_auth_flag = True
+
+ self.update_port_bind_flag = False
+ if sorted(self.existing_system_iscsi_config['port_bind']) != sorted(self.port_bind):
+ self.change_flag = True
+ self.update_port_bind_flag = True
+
+ self.update_send_target_authentication = False
+ if self.add_send_interface_flag is False:
+ for config in self.existing_system_iscsi_config['iscsi_send_targets']:
+ if config['address'] == self.send_target['address'] and \
+ config['port'] == self.send_target['port']:
+ auth_properties = config['authenticationProperties']
+ if auth_properties['chapAuthEnabled'] != self.send_target['authentication']['chap_auth_enabled']:
+ self.change_flag = True
+ self.update_send_target_authentication = True
+ if auth_properties['chapAuthenticationType'] != self.send_target['authentication']['chap_authentication_type']:
+ self.change_flag = True
+ self.update_send_target_authentication = True
+ if auth_properties['chapInherited'] != self.send_target['authentication']['chap_inherited']:
+ self.change_flag = True
+ self.update_send_target_authentication = True
+ if auth_properties['chapName'] != self.send_target['authentication']['chap_name']:
+ self.change_flag = True
+ self.update_send_target_authentication = True
+ if auth_properties['mutualChapAuthenticationType'] != self.send_target['authentication']['mutual_chap_authentication_type']:
+ self.change_flag = True
+ self.update_send_target_authentication = True
+ if auth_properties['mutualChapInherited'] != self.send_target['authentication']['mutual_chap_inherited']:
+ self.change_flag = True
+ self.update_send_target_authentication = True
+ if auth_properties['mutualChapName'] != self.send_target['authentication']['mutual_chap_name']:
+ self.change_flag = True
+ self.update_send_target_authentication = True
+ break
+
+ self.update_static_target_authentication = False
+ if self.add_static_interface_flag is False:
+ for config in self.existing_system_iscsi_config['iscsi_static_targets']:
+ if config['address'] == self.static_target['address'] and \
+ config['port'] == self.static_target['port']:
+ auth_properties = config['authenticationProperties']
+ if auth_properties['chapAuthEnabled'] != self.static_target['authentication']['chap_auth_enabled']:
+ self.change_flag = True
+ self.update_static_target_authentication = True
+ if auth_properties['chapAuthenticationType'] != self.static_target['authentication']['chap_authentication_type']:
+ self.change_flag = True
+ self.update_static_target_authentication = True
+ if auth_properties['chapInherited'] != self.static_target['authentication']['chap_inherited']:
+ self.change_flag = True
+ self.update_static_target_authentication = True
+ if auth_properties['chapName'] != self.static_target['authentication']['chap_name']:
+ self.change_flag = True
+ self.update_static_target_authentication = True
+ if auth_properties['mutualChapAuthenticationType'] != self.static_target['authentication']['mutual_chap_authentication_type']:
+ self.change_flag = True
+ self.update_static_target_authentication = True
+ if auth_properties['mutualChapInherited'] != self.static_target['authentication']['mutual_chap_inherited']:
+ self.change_flag = True
+ self.update_static_target_authentication = True
+ if auth_properties['mutualChapName'] != self.static_target['authentication']['mutual_chap_name']:
+ self.change_flag = True
+ self.update_static_target_authentication = True
+ break
+
+ if self.state == 'absent':
+ self.change_flag = False
+
+ self.remove_send_interface_flag = False
+ if self.existing_system_iscsi_config['iscsi_send_targets'] and self.send_target:
+ for config in self.existing_system_iscsi_config['iscsi_send_targets']:
+ if config['address'] == self.send_target['address'] and \
+ config['port'] == self.send_target['port']:
+ self.change_flag = True
+ self.remove_send_interface_flag = True
+
+ self.remove_static_interface_flag = False
+ if self.existing_system_iscsi_config['iscsi_static_targets'] and self.static_target:
+ for config in self.existing_system_iscsi_config['iscsi_static_targets']:
+ if config['address'] == self.static_target['address'] and \
+ config['port'] == self.static_target['port'] and \
+ config['iscsi_name'] == self.static_target['iscsi_name']:
+ self.change_flag = True
+ self.remove_static_interface_flag = True
+
+ self.remove_port_bind_flag = False
+ if self.iscsi_config:
+ for vnic in self.port_bind:
+ for existing_vnic in self.existing_system_iscsi_config['port_bind']:
+ if vnic == existing_vnic:
+ self.change_flag = True
+ self.remove_port_bind_flag = True
+
+ def generate_iscsi_config(self):
+ self.authentication_config = ''
+ self.authentication_send_target_config = ''
+ self.send_target_configs = []
+ self.static_target_configs = []
+
+ if self.authentication:
+ self.authentication_config = vim.host.InternetScsiHba.AuthenticationProperties()
+ self.authentication_config.chapAuthEnabled = self.authentication['chap_auth_enabled']
+ self.authentication_config.chapAuthenticationType = self.authentication['chap_authentication_type']
+ self.authentication_config.chapName = self.authentication['chap_name']
+ self.authentication_config.chapSecret = self.authentication['chap_secret']
+ self.authentication_config.mutualChapAuthenticationType = self.authentication['mutual_chap_authentication_type']
+ self.authentication_config.mutualChapName = self.authentication['mutual_chap_name']
+ self.authentication_config.mutualChapSecret = self.authentication['mutual_chap_secret']
+
+ if self.send_target:
+ send_target_config = vim.host.InternetScsiHba.SendTarget()
+ send_target_config.address = self.send_target['address']
+ send_target_config.port = self.send_target['port']
+
+ if self.send_target['authentication']:
+ self.send_target_authentication_config = vim.host.InternetScsiHba.AuthenticationProperties()
+ self.send_target_authentication_config.chapAuthEnabled = self.send_target['authentication']['chap_auth_enabled']
+ self.send_target_authentication_config.chapAuthenticationType = self.send_target['authentication']['chap_authentication_type']
+ self.send_target_authentication_config.chapInherited = self.send_target['authentication']['chap_inherited']
+ self.send_target_authentication_config.chapName = self.send_target['authentication']['chap_name']
+ self.send_target_authentication_config.chapSecret = self.send_target['authentication']['chap_secret']
+ self.send_target_authentication_config.mutualChapAuthenticationType = self.send_target['authentication']['mutual_chap_authentication_type']
+ self.send_target_authentication_config.mutualChapInherited = self.send_target['authentication']['mutual_chap_inherited']
+ self.send_target_authentication_config.mutualChapName = self.send_target['authentication']['mutual_chap_name']
+ self.send_target_authentication_config.mutualChapSecret = self.send_target['authentication']['mutual_chap_secret']
+
+ self.send_target_configs.append(send_target_config)
+
+ if self.static_target:
+ static_target_config = vim.host.InternetScsiHba.StaticTarget()
+ static_target_config.iScsiName = self.static_target['iscsi_name']
+ static_target_config.address = self.static_target['address']
+ static_target_config.port = self.static_target['port']
+
+ if self.static_target['authentication']:
+ self.static_target_authentication_config = vim.host.InternetScsiHba.AuthenticationProperties()
+ self.static_target_authentication_config.chapAuthEnabled = self.static_target['authentication']['chap_auth_enabled']
+ self.static_target_authentication_config.chapAuthenticationType = self.static_target['authentication']['chap_authentication_type']
+ self.static_target_authentication_config.chapInherited = self.static_target['authentication']['chap_inherited']
+ self.static_target_authentication_config.chapName = self.static_target['authentication']['chap_name']
+ self.static_target_authentication_config.chapSecret = self.static_target['authentication']['chap_secret']
+ self.static_target_authentication_config.mutualChapAuthenticationType = self.static_target['authentication']['mutual_chap_authentication_type']
+ self.static_target_authentication_config.mutualChapInherited = self.static_target['authentication']['mutual_chap_inherited']
+ self.static_target_authentication_config.mutualChapName = self.static_target['authentication']['mutual_chap_name']
+ self.static_target_authentication_config.mutualChapSecret = self.static_target['authentication']['mutual_chap_secret']
+
+ self.static_target_configs.append(static_target_config)
+
+ def execute(self):
+ self.host_obj = self.find_hostsystem_by_name(self.esxi_hostname)
+ if not self.host_obj:
+ self.module.fail_json(msg="Cannot find the specified ESXi host: %s" % self.esxi_hostname)
+
+ if not self.iscsi_config and (self.state == 'present' or self.state == 'absent'):
+ self.module.fail_json(msg="If state is present or absent must specify the iscsi_config parameter.")
+ else:
+ self.get_iscsi_config()
+
+ result = dict(changed=False)
+
+ if self.state == 'enabled':
+ self.diff_iscsi_config()
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.change_flag)
+
+ if self.existing_system_iscsi_config['iscsi_enabled'] is False:
+ try:
+ self.host_obj.configManager.storageSystem.UpdateSoftwareInternetScsiEnabled(enabled=True)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to enable iSCSI: %s" % to_native(e))
+
+ if self.state == 'disabled':
+ self.diff_iscsi_config()
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.change_flag)
+
+ if self.existing_system_iscsi_config['iscsi_enabled']:
+ try:
+ self.host_obj.configManager.storageSystem.UpdateSoftwareInternetScsiEnabled(enabled=False)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to disable iSCSI: %s" % to_native(e))
+
+ if self.state == 'present':
+ self.check_hba_name()
+ self.diff_iscsi_config()
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.change_flag)
+
+ if self.change_flag:
+ self.generate_iscsi_config()
+
+ # add a dynamic target to an iSCSI configuration
+ if self.add_send_interface_flag:
+ if self.send_target_configs:
+ try:
+ self.host_obj.configManager.storageSystem.AddInternetScsiSendTargets(
+ iScsiHbaDevice=self.vmhba_name,
+ targets=self.send_target_configs)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to add a dynamic target: %s" % to_native(e))
+
+ # add a static target to an iSCSI configuration
+ if self.add_static_interface_flag:
+ if self.static_target_configs:
+ try:
+ self.host_obj.configManager.storageSystem.AddInternetScsiStaticTargets(
+ iScsiHbaDevice=self.vmhba_name,
+ targets=self.static_target_configs)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to add a static target: %s" % to_native(e))
+
+ # update a CHAP authentication of a dynamic target in an iSCSI configuration
+ if self.update_send_target_authentication:
+ target_set = vim.host.InternetScsiHba.TargetSet()
+ target_set.sendTargets = self.send_target_configs
+ try:
+ self.host_obj.configManager.storageSystem.UpdateInternetScsiAuthenticationProperties(
+ iScsiHbaDevice=self.vmhba_name,
+ authenticationProperties=self.send_target_authentication_config,
+ targetSet=target_set)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to update a CHAP authentication of a dynamic target: %s" % to_native(e))
+
+ # update a CHAP authentication of a static target in an iSCSI configuration
+ if self.update_static_target_authentication:
+ target_set = vim.host.InternetScsiHba.TargetSet()
+ target_set.staticTargets = self.static_target_configs
+ try:
+ self.host_obj.configManager.storageSystem.UpdateInternetScsiAuthenticationProperties(
+ iScsiHbaDevice=self.vmhba_name,
+ authenticationProperties=self.static_target_authentication_config,
+ targetSet=target_set)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to update a CHAP authentication of a static target: %s" % to_native(e))
+
+ # update an iqn in an iSCSI configuration
+ if self.update_iscsi_name_flag:
+ try:
+ self.host_obj.configManager.storageSystem.UpdateInternetScsiName(
+ iScsiHbaDevice=self.vmhba_name, iScsiName=self.iscsi_name)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to update an iqn: %s" % to_native(e))
+
+ # update an alias in an iSCSI configuration
+ if self.update_alias_flag:
+ try:
+ self.host_obj.configManager.storageSystem.UpdateInternetScsiAlias(
+ iScsiHbaDevice=self.vmhba_name, iScsiAlias=self.alias)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to update an alias: %s" % to_native(e))
+
+ # update a CHAP authentication an iSCSI configuration
+ if self.update_auth_flag:
+ try:
+ self.host_obj.configManager.storageSystem.UpdateInternetScsiAuthenticationProperties(
+ iScsiHbaDevice=self.vmhba_name,
+ authenticationProperties=self.authentication_config,
+ targetSet=None)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to update a CHAP authentication: %s" % to_native(e))
+
+ # add port binds in an iSCSI configuration
+ if self.update_port_bind_flag:
+ for vnic in list(filter(lambda x: x not in self.existing_system_iscsi_config['port_bind'],
+ self.port_bind)):
+ try:
+ self.host_obj.configManager.iscsiManager.BindVnic(iScsiHbaName=self.vmhba_name,
+ vnicDevice=vnic)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to add a port bind: %s" % to_native(e))
+
+ if self.state == 'absent':
+ self.check_hba_name()
+ self.diff_iscsi_config()
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=self.change_flag)
+
+ if self.change_flag:
+ self.generate_iscsi_config()
+
+ # remove a dynamic target to an iSCSI configuration
+ if self.remove_send_interface_flag:
+ try:
+ self.host_obj.configManager.storageSystem.RemoveInternetScsiSendTargets(
+ iScsiHbaDevice=self.vmhba_name,
+ targets=self.send_target_configs)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to remove a dynamic target: %s" % to_native(e))
+
+ # remove a static target to an iSCSI configuration
+ if self.remove_static_interface_flag:
+ try:
+ self.host_obj.configManager.storageSystem.RemoveInternetScsiStaticTargets(
+ iScsiHbaDevice=self.vmhba_name,
+ targets=self.static_target_configs)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to remove a static target: %s" % to_native(e))
+
+ # remove port binds from an iSCSI configuration
+ if self.remove_port_bind_flag:
+ for vnic in list(set(self.existing_system_iscsi_config['port_bind']) & set(self.port_bind)):
+ try:
+ self.host_obj.configManager.iscsiManager.UnbindVnic(iScsiHbaName=self.vmhba_name,
+ vnicDevice=vnic,
+ force=self.force)
+ result['changed'] = True
+ except Exception as e:
+ self.module.fail_json(msg="Failed to remove a port bind: %s" % to_native(e))
+
+ if result['changed'] is True:
+ self.get_iscsi_config()
+ result['iscsi_properties'] = self.existing_system_iscsi_config
+
+ self.module.exit_json(**result)
+
+
+def main():
+ # base authentication parameters.
+ authentication = dict(type='dict', apply_defaults=True,
+ options=dict(
+ chap_auth_enabled=dict(type='bool', default=False),
+ chap_authentication_type=dict(type='str', default='chapProhibited',
+ choices=['chapDiscouraged', 'chapPreferred', 'chapRequired',
+ 'chapProhibited']),
+ chap_name=dict(type='str', default=''),
+ chap_secret=dict(type='str', no_log=True),
+ mutual_chap_authentication_type=dict(type='str', default='chapProhibited',
+ choices=['chapProhibited', 'chapRequired']),
+ mutual_chap_name=dict(type='str', default=''),
+ mutual_chap_secret=dict(type='str', no_log=True)))
+
+ authentication_target = deepcopy(authentication)
+ authentication_target['options'].update(
+ chap_inherited=dict(type='bool', default=True),
+ mutual_chap_inherited=dict(type='bool', default=True)
+ )
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ esxi_hostname=dict(type='str', required=True),
+ iscsi_config=dict(type='dict',
+ options=dict(
+ iscsi_name=dict(type='str', default=None, aliases=['initiator_iqn']),
+ alias=dict(type='str', default=''),
+ authentication=authentication,
+ port_bind=dict(type='list', elements='str', default=[]),
+ force=dict(type='bool', default=False),
+ vmhba_name=dict(type='str', required=True),
+ send_target=dict(type='dict',
+ options=dict(
+ address=dict(type='str', required=True),
+ port=dict(type='int', default=3260),
+ authentication=authentication_target
+ )),
+ static_target=dict(type='dict',
+ options=dict(
+ iscsi_name=dict(type='str', required=True),
+ address=dict(type='str', required=True),
+ port=dict(type='int', default=3260),
+ authentication=authentication_target
+ ))
+ )),
+ state=dict(type='str', choices=['present', 'absent', 'enabled', 'disabled'], default='present')
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vmware_host_iscsi_manager = VMwareHostiScsiManager(module)
+ vmware_host_iscsi_manager.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_iscsi_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_iscsi_info.py
new file mode 100644
index 000000000..4030b3ba6
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_iscsi_info.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: vmware_host_iscsi_info
+short_description: Gather iSCSI configuration information of ESXi host
+author:
+ - sky-joker (@sky-joker)
+description:
+ - This module can be used to gather information about the iSCSI configuration of the ESXi host.
+options:
+ esxi_hostname:
+ description:
+ - The ESXi hostname on which to gather iSCSI settings.
+ type: str
+ required: true
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Gather iSCSI configuration information of ESXi host
+ community.vmware.vmware_host_iscsi_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ register: iscsi_info
+'''
+
+RETURN = r'''
+iscsi_properties:
+ description: dictionary of current iSCSI information
+ returned: always
+ type: dict
+ sample: >-
+ {
+ "iscsi_alias": "",
+ "iscsi_authentication_properties": {
+ "_vimtype": "vim.host.InternetScsiHba.AuthenticationProperties",
+ "chapAuthEnabled": false,
+ "chapAuthenticationType": "chapProhibited",
+ "chapInherited": null,
+ "chapName": "",
+ "chapSecret": "XXXXXXXXX",
+ "mutualChapAuthenticationType": "chapProhibited",
+ "mutualChapInherited": null,
+ "mutualChapName": "",
+ "mutualChapSecret": "XXXXXXXXX"
+ },
+ "iscsi_enabled": true,
+ "iscsi_name": "iqn.1998-01.com.vmware:esxi-033f58ee",
+ "iscsi_send_targets": [
+ {
+ "address": "192.168.0.1",
+ "authenticationProperties": {
+ "_vimtype": "vim.host.InternetScsiHba.AuthenticationProperties",
+ "chapAuthEnabled": false,
+ "chapAuthenticationType": "chapProhibited",
+ "chapInherited": true,
+ "chapName": "",
+ "chapSecret": "XXXXXXXXX",
+ "mutualChapAuthenticationType": "chapProhibited",
+ "mutualChapInherited": true,
+ "mutualChapName": "",
+ "mutualChapSecret": "XXXXXXXXX"
+ },
+ "port": 3260
+ }
+ ],
+ "iscsi_static_targets": [
+ {
+ "address": "192.168.0.1",
+ "authenticationProperties": {
+ "_vimtype": "vim.host.InternetScsiHba.AuthenticationProperties",
+ "chapAuthEnabled": false,
+ "chapAuthenticationType": "chapProhibited",
+ "chapInherited": true,
+ "chapName": "",
+ "chapSecret": "XXXXXXXXX",
+ "mutualChapAuthenticationType": "chapProhibited",
+ "mutualChapInherited": true,
+ "mutualChapName": "",
+ "mutualChapSecret": "XXXXXXXXX"
+ },
+ "iscsi_name": "iqn.2004-04.com.qnap:tvs-673:iscsi.vm3.2c580e",
+ "port": 3260
+ }
+ ],
+ "port_bind": [],
+ "vmhba_name": "vmhba65"
+ }
+detected_iscsi_drives:
+ description:
+ - list of detected iSCSI drive
+ - added from version 1.9.0
+ returned: always
+ type: list
+ sample: >-
+ [
+ {
+ "address": [
+ "192.168.0.57:3260"
+ ],
+ "canonical_name": "naa.60014055f198fb3d0cb4bd7ae1f802e1",
+ "iscsi_name": "iqn.2021-03.local.iscsi-target:iscsi-storage.target0"
+ }
+ ]
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+import re
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+
+class VMwareHostiScsiInfo(PyVmomi):
+ def __init__(self, module):
+ super(VMwareHostiScsiInfo, self).__init__(module)
+ self.esxi_hostname = self.params['esxi_hostname']
+
+ def get_iscsi_config(self):
+ iscsi_enabled = self.host_obj.config.storageDevice.softwareInternetScsiEnabled
+ self.existing_system_iscsi_config = {
+ 'iscsi_enabled': iscsi_enabled
+ }
+ self.detected_iscsi_drives = []
+ if iscsi_enabled is True:
+ for hba in self.host_obj.config.storageDevice.hostBusAdapter:
+ if isinstance(hba, vim.host.InternetScsiHba):
+ self.existing_system_iscsi_config.update(
+ {
+ 'vmhba_name': hba.device,
+ 'iscsi_name': hba.iScsiName,
+ 'iscsi_alias': hba.iScsiAlias,
+ 'iscsi_authentication_properties': self.to_json(hba.authenticationProperties)
+ }
+ )
+
+ iscsi_send_targets = []
+ for iscsi_send_target in self.to_json(hba.configuredSendTarget):
+ iscsi_send_targets.append({
+ 'address': iscsi_send_target['address'],
+ 'authenticationProperties': iscsi_send_target['authenticationProperties'],
+ 'port': iscsi_send_target['port']
+ })
+ self.existing_system_iscsi_config['iscsi_send_targets'] = iscsi_send_targets
+
+ iscsi_static_targets = []
+ for iscsi_static_target in self.to_json(hba.configuredStaticTarget):
+ iscsi_static_targets.append({
+ 'iscsi_name': iscsi_static_target['iScsiName'],
+ 'address': iscsi_static_target['address'],
+ 'authenticationProperties': iscsi_static_target['authenticationProperties'],
+ 'port': iscsi_static_target['port']
+ })
+ self.existing_system_iscsi_config['iscsi_static_targets'] = iscsi_static_targets
+
+ detected_iscsi_drives_information = []
+ for lun in self.host_obj.config.storageDevice.scsiLun:
+ if isinstance(lun, vim.host.ScsiDisk):
+ detected_iscsi_drives_information.append({
+ 'key': lun.key,
+ 'canonical_name': lun.canonicalName
+ })
+
+ for scsi_adapter in self.host_obj.config.storageDevice.scsiTopology.adapter:
+ if isinstance(scsi_adapter, vim.host.ScsiTopology.Interface):
+ if re.search(self.existing_system_iscsi_config['vmhba_name'], scsi_adapter.key):
+ for target in scsi_adapter.target:
+ scsi_lun = target.lun[0].scsiLun
+ for scsi_info in detected_iscsi_drives_information:
+ if scsi_info['key'] == scsi_lun:
+ self.detected_iscsi_drives.append({
+ 'iscsi_name': target.transport.iScsiName,
+ 'canonical_name': scsi_info['canonical_name'],
+ 'address': target.transport.address
+ })
+
+ vnic_devices = []
+ for vnic in self.host_obj.configManager.iscsiManager.QueryBoundVnics(iScsiHbaName=self.existing_system_iscsi_config['vmhba_name']):
+ vnic_devices.append(vnic.vnicDevice)
+ self.existing_system_iscsi_config['port_bind'] = vnic_devices
+
+ def execute(self):
+ self.host_obj = self.find_hostsystem_by_name(self.esxi_hostname)
+ if not self.host_obj:
+ self.module.fail_json(msg="Cannot find the specified ESXi host: %s" % self.esxi_hostname)
+
+ self.get_iscsi_config()
+ self.module.exit_json(changed=False, iscsi_properties=self.existing_system_iscsi_config, detected_iscsi_drives=self.detected_iscsi_drives)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ esxi_hostname=dict(type='str', required=True)
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vmware_host_iscsi_info = VMwareHostiScsiInfo(module)
+ vmware_host_iscsi_info.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_kernel_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_kernel_manager.py
new file mode 100644
index 000000000..834d994c0
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_kernel_manager.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2019, Aaron Longchamps, <a.j.longchamps@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_kernel_manager
+short_description: Manage kernel module options on ESXi hosts
+description:
+- This module can be used to manage kernel module options on ESXi hosts.
+- All connected ESXi hosts in scope will be configured when specified.
+- If a host is not connected at time of configuration, it will be marked as such in the output.
+- Kernel module options may require a reboot to take effect which is not covered here.
+- You can use M(ansible.builtin.reboot) or M(community.vmware.vmware_host_powerstate) module to reboot all ESXi host systems.
+author:
+- Aaron Longchamps (@alongchamps)
+options:
+ esxi_hostname:
+ description:
+ - Name of the ESXi host to work on.
+ - This parameter is required if C(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the VMware cluster to work on.
+ - All ESXi hosts in this cluster will be configured.
+ - This parameter is required if C(esxi_hostname) is not specified.
+ type: str
+ kernel_module_name:
+ description:
+ - Name of the kernel module to be configured.
+ required: true
+ type: str
+ kernel_module_option:
+ description:
+ - Specified configurations will be applied to the given module.
+ - These values are specified in key=value pairs and separated by a space when there are multiple options.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure IPv6 to be off via tcpip4 kernel module
+ community.vmware.vmware_host_kernel_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ kernel_module_name: "tcpip4"
+ kernel_module_option: "ipv6=0"
+
+- name: Using cluster_name, configure vmw_psp_rr options
+ community.vmware.vmware_host_kernel_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ virtual_cluster_name }}'
+ kernel_module_name: "vmw_psp_rr"
+ kernel_module_option: "maxPathsPerDevice=2"
+'''
+
+RETURN = r'''
+host_kernel_status:
+ description:
+ - dict with information on what was changed, by ESXi host in scope.
+ returned: success
+ type: dict
+ sample: {
+ "results": {
+ "myhost01.example.com": {
+ "changed": true,
+ "configured_options": "ipv6=0",
+ "msg": "Options have been changed on the kernel module",
+ "original_options": "ipv6=1"
+ }
+ }
+}
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+from ansible.module_utils._text import to_native
+
+
+class VmwareKernelManager(PyVmomi):
+ def __init__(self, module):
+ self.module = module
+ super(VmwareKernelManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ self.kernel_module_name = self.params.get('kernel_module_name')
+ self.kernel_module_option = self.params.get('kernel_module_option')
+ self.results = {}
+
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find a host system that matches the specified criteria")
+
+ # find kernel module options for a given kmod_name. If the name is not right, this will throw an exception
+ def get_kernel_module_option(self, host, kmod_name):
+ host_kernel_manager = host.configManager.kernelModuleSystem
+
+ try:
+ return host_kernel_manager.QueryConfiguredModuleOptionString(self.kernel_module_name)
+ except vim.fault.NotFound as kernel_fault:
+ self.module.fail_json(msg="Failed to find kernel module on host '%s'. More information: %s" % (host.name, to_native(kernel_fault.msg)))
+
+ # configure the provided kernel module with the specified options
+ def apply_kernel_module_option(self, host, kmod_name, kmod_option):
+ host_kernel_manager = host.configManager.kernelModuleSystem
+
+ if host_kernel_manager:
+ try:
+ if not self.module.check_mode:
+ host_kernel_manager.UpdateModuleOptionString(kmod_name, kmod_option)
+ except vim.fault.NotFound as kernel_fault:
+ self.module.fail_json(msg="Failed to find kernel module on host '%s'. More information: %s" % (host.name, to_native(kernel_fault)))
+ except Exception as kernel_fault:
+ self.module.fail_json(msg="Failed to configure kernel module for host '%s' due to: %s" % (host.name, to_native(kernel_fault)))
+
+ # evaluate our current configuration against desired options and save results
+ def check_host_configuration_state(self):
+ change_list = []
+
+ for host in self.hosts:
+ changed = False
+ msg = ""
+ self.results[host.name] = dict()
+
+ if host.runtime.connectionState == "connected":
+ host_kernel_manager = host.configManager.kernelModuleSystem
+
+ if host_kernel_manager:
+ # keep track of original options on the kernel module
+ original_options = self.get_kernel_module_option(host, self.kernel_module_name)
+ desired_options = self.kernel_module_option
+
+ # apply as needed, also depending on check mode
+ if original_options != desired_options:
+ changed = True
+ if self.module.check_mode:
+ msg = "Options would be changed on the kernel module"
+ else:
+ self.apply_kernel_module_option(host, self.kernel_module_name, desired_options)
+ msg = "Options have been changed on the kernel module"
+ self.results[host.name]['configured_options'] = desired_options
+ else:
+ msg = "Options are already the same"
+
+ change_list.append(changed)
+ self.results[host.name]['changed'] = changed
+ self.results[host.name]['msg'] = msg
+ self.results[host.name]['original_options'] = original_options
+
+ else:
+ msg = "No kernel module manager found on host %s - impossible to configure." % host.name
+ self.results[host.name]['changed'] = changed
+ self.results[host.name]['msg'] = msg
+ else:
+ msg = "Host %s is disconnected and cannot be changed." % host.name
+ self.results[host.name]['changed'] = changed
+ self.results[host.name]['msg'] = msg
+
+ self.module.exit_json(changed=any(change_list), host_kernel_status=self.results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ # add the arguments we're going to use for this module
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ kernel_module_name=dict(type='str', required=True),
+ kernel_module_option=dict(type='str', required=True),
+ )
+
+ # make sure we have a valid target cluster_name or esxi_hostname (not both)
+ # and also enable check mode
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ mutually_exclusive=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ )
+
+ vmware_host_config = VmwareKernelManager(module)
+ vmware_host_config.check_host_configuration_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_lockdown.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_lockdown.py
new file mode 100644
index 000000000..2686d2f13
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_lockdown.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_lockdown
+short_description: Manage administrator permission for the local administrative account for the ESXi host
+description:
+- This module can be used to manage administrator permission for the local administrative account for the host when ESXi hostname is given.
+- All parameters and VMware objects values are case sensitive.
+- This module is destructive as administrator permission are managed using APIs used, please read options carefully and proceed.
+- Please specify C(hostname) as vCenter IP or hostname only, as lockdown operations are not possible from standalone ESXi server.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of cluster.
+ - All host systems from given cluster used to manage lockdown.
+ - Required parameter, if C(esxi_hostname) is not set.
+ type: str
+ esxi_hostname:
+ description:
+ - List of ESXi hostname to manage lockdown.
+ - Required parameter, if C(cluster_name) is not set.
+ - See examples for specifications.
+ type: list
+ elements: str
+ state:
+ description:
+ - State of hosts system
+ - If set to C(disabled), all host systems will be removed from lockdown mode.
+ - If host system is already out of lockdown mode and set to C(disabled), no action will be taken.
+ - If set to C(normal), all host systems will be set in lockdown mode.
+ - If host system is already in lockdown mode and set to C(normal), no action will be taken.
+ - If set to C(strict), all host systems will be set in strict lockdown mode.
+ - If host system is already in strict lockdown mode and set to C(strict), no action will be taken.
+ default: normal
+ choices: [ disabled, normal, strict, present, absent ]
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Enter host system into lockdown mode
+ community.vmware.vmware_host_lockdown:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: normal
+ delegate_to: localhost
+
+- name: Exit host systems from lockdown mode
+ community.vmware.vmware_host_lockdown:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: disabled
+ delegate_to: localhost
+
+- name: Enter host systems into lockdown mode
+ community.vmware.vmware_host_lockdown:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname:
+ - '{{ esxi_hostname_1 }}'
+ - '{{ esxi_hostname_2 }}'
+ state: normal
+ delegate_to: localhost
+
+- name: Exit host systems from lockdown mode
+ community.vmware.vmware_host_lockdown:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname:
+ - '{{ esxi_hostname_1 }}'
+ - '{{ esxi_hostname_2 }}'
+ state: disabled
+ delegate_to: localhost
+
+- name: Enter all host system from cluster into lockdown mode
+ community.vmware.vmware_host_lockdown:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ state: normal
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+results:
+ description: metadata about state of Host system lock down
+ returned: always
+ type: dict
+ sample: {
+ "host_lockdown_state": {
+ "DC0_C0": {
+ "current_state": "normal",
+ "previous_state": "disabled",
+ "desired_state": "normal",
+ },
+ }
+ }
+'''
+
+try:
+ from pyVmomi import vim
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+from ansible.module_utils._text import to_native
+
+
+class VmwareLockdownManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareLockdownManager, self).__init__(module)
+ if not self.is_vcenter():
+ self.module.fail_json(msg="Lockdown operations are performed from vCenter only. "
+ "hostname %s is an ESXi server. Please specify hostname "
+ "as vCenter server." % self.module.params['hostname'])
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+
+ def ensure(self):
+ """
+ Function to manage internal state management
+ """
+ results = dict(changed=False, host_lockdown_state=dict())
+ change_list = []
+ desired_state = self.params.get('state')
+
+ if desired_state == 'present':
+ self.module.warn("'present' will be removed in a future version. Please use 'normal' instead.")
+ desired_state = 'normal'
+ elif desired_state == 'absent':
+ self.module.warn("'absent' will be removed in a future version. Please use 'disabled' instead.")
+ desired_state = 'disabled'
+
+ for host in self.hosts:
+ current_state_api = host.configManager.hostAccessManager.lockdownMode
+ current_state = current_state_api[8:].lower()
+ results['host_lockdown_state'][host.name] = dict(current_state=desired_state,
+ desired_state=desired_state,
+ previous_state=current_state
+ )
+ changed = False
+ if current_state != desired_state:
+ changed = True
+ if not self.module.check_mode:
+ try:
+ desired_state_api = 'lockdown' + desired_state.capitalize()
+ host.configManager.hostAccessManager.ChangeLockdownMode(desired_state_api)
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(msg="Failed to manage lockdown mode for esxi"
+ " hostname %s : %s" % (host.name, to_native(host_config_fault.msg)))
+ except vim.fault.AdminDisabled as admin_disabled:
+ self.module.fail_json(msg="Failed to manage lockdown mode as administrator "
+ "permission has been disabled for "
+ "esxi hostname %s : %s" % (host.name, to_native(admin_disabled.msg)))
+ except Exception as generic_exception:
+ self.module.fail_json(msg="Failed to manage lockdown mode due to generic exception for esxi "
+ "hostname %s : %s" % (host.name, to_native(generic_exception)))
+
+ change_list.append(changed)
+
+ if any(change_list):
+ results['changed'] = True
+
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='list', required=False, elements='str'),
+ state=dict(type='str', default='normal', choices=['disabled', 'normal', 'strict', 'present', 'absent'], required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ]
+ )
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi required for this module')
+
+ vmware_lockdown_mgr = VmwareLockdownManager(module)
+ vmware_lockdown_mgr.ensure()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_lockdown_exceptions.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_lockdown_exceptions.py
new file mode 100644
index 000000000..00c9ec793
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_lockdown_exceptions.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, Mario Lenz <m@riolenz.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_lockdown_exceptions
+version_added: '3.1.0'
+short_description: Manage Lockdown Mode Exception Users
+description:
+- This module can be used to manage Lockdown Mode Exception Users.
+- All parameters and VMware objects values are case sensitive.
+- Please specify C(hostname) as vCenter IP or hostname only, as lockdown operations are not possible from standalone ESXi server.
+author:
+- Mario Lenz (@mariolenz)
+options:
+ cluster_name:
+ description:
+ - Name of cluster.
+ - All host systems from given cluster used to manage exception users.
+ - Required parameter, if C(esxi_hostname) is not set.
+ type: str
+ esxi_hostname:
+ description:
+ - List of ESXi hostname to manage exception users.
+ - Required parameter, if C(cluster_name) is not set.
+ type: list
+ elements: str
+ state:
+ description:
+ - If C(present), make sure the given users are defined as Lockdown Mode Exception Users.
+ - If C(absent), make sure the given users are NO Lockdown Mode Exception Users.
+ - If C(set), will replace Lockdown Mode Exception Users defined list of users.
+ default: present
+ choices: [ present, absent , set ]
+ type: str
+ exception_users:
+ description:
+ - List of Lockdown Mode Exception Users.
+ - To remove all Exception Users, I(state=set) the empty list.
+ type: list
+ elements: str
+ required: true
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Remove all Lockdown Mode Exception Users on a host
+ community.vmware.vmware_host_lockdown:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ exception_users: []
+ state: set
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+results:
+ description: metadata about exception users of Host systems
+ returned: always
+ type: dict
+ sample: {
+ "host_lockdown_exceptions": {
+ "DC0_C0": {
+ "current_exception_users": [],
+ "desired_exception_users": [],
+ "previous_exception_users": [
+ "root"
+ ]
+ },
+ }
+ }
+'''
+
+try:
+ from pyVmomi import vim
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+from ansible.module_utils._text import to_native
+
+
+class VmwareLockdownManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareLockdownManager, self).__init__(module)
+ if not self.is_vcenter():
+ self.module.fail_json(msg="Lockdown operations are performed from vCenter only. "
+ "hostname %s is an ESXi server. Please specify hostname "
+ "as vCenter server." % self.module.params['hostname'])
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+
+ def ensure(self):
+ """
+ Function to manage internal state management
+ """
+ results = dict(changed=False, host_lockdown_exceptions=dict())
+ change_list = []
+ desired_state = self.params.get('state')
+ exception_users = self.params.get('exception_users')
+ for host in self.hosts:
+ current_exception_users = host.configManager.hostAccessManager.QueryLockdownExceptions()
+ current_exception_users.sort()
+ new_exception_users = current_exception_users.copy()
+ results['host_lockdown_exceptions'][host.name] = dict(previous_exception_users=current_exception_users)
+ changed = False
+
+ if desired_state == 'present':
+ for user in exception_users:
+ if user not in current_exception_users:
+ new_exception_users.append(user)
+ changed = True
+ elif desired_state == 'absent':
+ for user in exception_users:
+ if user in current_exception_users:
+ new_exception_users.remove(user)
+ changed = True
+ elif desired_state == 'set':
+ if set(current_exception_users) != set(exception_users):
+ new_exception_users = exception_users
+ changed = True
+
+ new_exception_users.sort()
+ results['host_lockdown_exceptions'][host.name]['desired_exception_users'] = new_exception_users
+ results['host_lockdown_exceptions'][host.name]['current_exception_users'] = new_exception_users
+
+ if changed and not self.module.check_mode:
+ try:
+ host.configManager.hostAccessManager.UpdateLockdownExceptions(new_exception_users)
+
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(msg="Failed to manage lockdown mode for esxi"
+ " hostname %s : %s" % (host.name, to_native(host_config_fault.msg)))
+ except vim.fault.AdminDisabled as admin_disabled:
+ self.module.fail_json(msg="Failed to manage lockdown mode as administrator "
+ "permission has been disabled for "
+ "esxi hostname %s : %s" % (host.name, to_native(admin_disabled.msg)))
+ except Exception as generic_exception:
+ self.module.fail_json(msg="Failed to manage lockdown mode due to generic exception for esxi "
+ "hostname %s : %s" % (host.name, to_native(generic_exception)))
+ change_list.append(changed)
+
+ if any(change_list):
+ results['changed'] = True
+
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='list', required=False, elements='str'),
+ exception_users=dict(type='list', required=True, elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'set'], required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ]
+ )
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi required for this module')
+
+ vmware_lockdown_mgr = VmwareLockdownManager(module)
+ vmware_lockdown_mgr.ensure()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_logbundle.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_logbundle.py
new file mode 100644
index 000000000..433314595
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_logbundle.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_logbundle
+short_description: Fetch logbundle file from ESXi
+description:
+ - This module can be used to fetch logbundle file from ESXi.
+author:
+ - sky-joker (@sky-joker)
+options:
+ esxi_hostname:
+ description:
+ - Name of the host system to fetch the logbundle.
+ type: str
+ required: true
+ dest:
+ description:
+ - file destination on localhost, path must be exist.
+ type: str
+ required: true
+ manifests:
+ description:
+ - Logs to include in the logbundle file.
+ - Refer to the id key of the M(community.vmware.vmware_host_logbundle_info) module for values that can be specified in the manifest.
+ default:
+ - System:Base
+ - System:CoreDumps
+ - System:EsxImage
+ - System:IOFilter
+ - System:LoadESX
+ - System:Modules
+ - System:RDMA
+ - System:ResourceGroups
+ - System:TPM
+ - System:VFlash
+ - System:VMTools
+ - System:VmiofPlugins
+ - System:ntp
+ - System:uwstats
+ - Fcd:Catalog
+ - VirtualMachines:CoreDumps
+ - VirtualMachines:VirtualMachineStats
+ - VirtualMachines:base
+ - VirtualMachines:base
+ - VirtualMachines:diskinfo
+ - VirtualMachines:logs
+ - Storage:FCoE
+ - Storage:Multipathing
+ - Storage:NAS
+ - Storage:VSAN
+ - Storage:VSANHealth
+ - Storage:VSANIscsiTarget
+ - Storage:VSANPerfStats
+ - Storage:VSANPerfSvc
+ - Storage:VSANTraces
+ - Storage:VVOL
+ - Storage:base
+ - Storage:iodm
+ - Storage:iscsi
+ - FeatureStateSwitch:FeatureStateSwitch
+ - Userworld:HostAgent
+ - Userworld:ProcessInformation
+ - Configuration:System
+ - Logs:System
+ - hostProfiles:SystemImageCacheHostProfile
+ - hostProfiles:hostProfiles
+ - FileSystem:VMFSDiskDump
+ - FileSystem:base
+ - ActiveDirectory:base
+ - CIM:base
+ - Hardware:base
+ - Hardware:usb
+ - Installer:base
+ - Network:base
+ - Network:dvs
+ - Network:lacp
+ - Network:nscd
+ - Network:tcpip
+ - IntegrityChecks:md5sums
+ type: list
+ elements: str
+ required: false
+ performance_data:
+ description:
+ - Gather performance data for ESXi.
+ type: dict
+ required: false
+ suboptions:
+ duration:
+ description:
+ - Duration for which performance data is gathered.
+ type: int
+ default: 300
+ interval:
+ description:
+ - Interval for which performance data is gathered.
+ type: int
+ default: 5
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: fetch logbundle file from ESXi
+ community.vmware.vmware_host_logbundle:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ dest: ./esxi-log.tgz
+
+- name: fetch logbundle file from ESXi with manifests
+ community.vmware.vmware_host_logbundle:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ dest: ./esxi-log.tgz
+ manifests:
+ - System:Base
+ - VirtualMachines:VirtualMachineStats
+'''
+
+RETURN = r'''
+dest:
+ description: saved path of a logbundle file for ESXi
+ returned: on success
+ type: str
+ sample:
+ {
+ "changed": true,
+ "dest": "./esxi-log.tgz",
+ "failed": false,
+ "gid": 0,
+ "group": "root",
+ "mode": "0644",
+ "owner": "root",
+ "size": 25783140,
+ "state": "file",
+ "uid": 0
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+import xml.etree.ElementTree as ET
+
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class VMwareHostLogbundle(PyVmomi):
+ def __init__(self, module):
+ super(VMwareHostLogbundle, self).__init__(module)
+ self.esxi_hostname = self.params['esxi_hostname']
+ self.dest = self.params['dest']
+ self.manifests = self.params['manifests']
+ self.performance_data = self.params['performance_data']
+
+ if not self.dest.endswith('.tgz'):
+ self.dest = self.dest + '.tgz'
+
+ def generate_req_headers(self, url):
+ # get ticket
+ req = vim.SessionManager.HttpServiceRequestSpec(method='httpGet', url=url)
+ ticket = self.content.sessionManager.AcquireGenericServiceTicket(req)
+
+ headers = {
+ 'Content-Type': 'application/octet-stream',
+ 'Cookie': 'vmware_cgi_ticket=%s' % ticket.id
+ }
+
+ return headers
+
+ def validate_manifests(self):
+ url = 'https://' + self.esxi_hostname + '/cgi-bin/vm-support.cgi?listmanifests=1'
+ headers = self.generate_req_headers(url)
+
+ manifests = []
+ try:
+ resp, info = fetch_url(self.module, method='GET', headers=headers, url=url)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to fetch manifests from %s: %s" % (url, info['msg']))
+ manifest_list = ET.fromstring(resp.read())
+ for manifest in manifest_list[0]:
+ manifests.append(manifest.attrib['id'])
+
+ except Exception as e:
+ self.module.fail_json(msg="Failed to fetch manifests from %s: %s" % (url, e))
+
+ for manifest in self.manifests:
+ validate_manifest_result = [m for m in manifests if m == manifest]
+ if not validate_manifest_result:
+ self.module.fail_json(msg="%s is a manifest that cannot be specified." % manifest)
+
+ def get_logbundle(self):
+ self.validate_manifests()
+ url = 'https://' + self.esxi_hostname + '/cgi-bin/vm-support.cgi?manifests=' + '&'.join(self.manifests)
+
+ if self.performance_data:
+ duration = self.performance_data.get('duration')
+ interval = self.performance_data.get('interval')
+ url = url + '&performance=true&duration=%s&interval=%s' % (duration, interval)
+
+ headers = self.generate_req_headers(url)
+
+ try:
+ resp, info = fetch_url(self.module, method='GET', headers=headers, url=url)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to fetch logbundle from %s: %s" % (url, info['msg']))
+ with open(self.dest, 'wb') as local_file:
+ local_file.write(resp.read())
+
+ except Exception as e:
+ self.module.fail_json(msg="Failed to fetch logbundle from %s: %s" % (url, e))
+
+ self.module.exit_json(changed=True, dest=self.dest)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ esxi_hostname=dict(type='str', required=True),
+ dest=dict(type='str', required=True),
+ manifests=dict(type='list', elements='str',
+ default=['System:Base', 'System:CoreDumps', 'System:EsxImage', 'System:IOFilter',
+ 'System:LoadESX', 'System:Modules', 'System:RDMA', 'System:ResourceGroups',
+ 'System:TPM', 'System:VFlash', 'System:VMTools', 'System:VmiofPlugins', 'System:ntp',
+ 'System:uwstats', 'Fcd:Catalog', 'VirtualMachines:CoreDumps',
+ 'VirtualMachines:VirtualMachineStats', 'VirtualMachines:base', 'VirtualMachines:base',
+ 'VirtualMachines:diskinfo', 'VirtualMachines:logs', 'Storage:FCoE',
+ 'Storage:Multipathing', 'Storage:NAS', 'Storage:VSAN', 'Storage:VSANHealth',
+ 'Storage:VSANIscsiTarget', 'Storage:VSANPerfStats', 'Storage:VSANPerfSvc',
+ 'Storage:VSANTraces', 'Storage:VVOL', 'Storage:base', 'Storage:iodm', 'Storage:iscsi',
+ 'FeatureStateSwitch:FeatureStateSwitch', 'Userworld:HostAgent',
+ 'Userworld:ProcessInformation', 'Configuration:System', 'Logs:System',
+ 'hostProfiles:SystemImageCacheHostProfile', 'hostProfiles:hostProfiles',
+ 'FileSystem:VMFSDiskDump', 'FileSystem:base', 'ActiveDirectory:base', 'CIM:base',
+ 'Hardware:base', 'Hardware:usb', 'Installer:base', 'Network:base', 'Network:dvs',
+ 'Network:lacp', 'Network:nscd', 'Network:tcpip', 'IntegrityChecks:md5sums']),
+ performance_data=dict(type='dict', required=False,
+ options=dict(
+ duration=dict(type='int', default=300),
+ interval=dict(type='int', default=5)
+ ))
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vmware_host_logbundle_mgr = VMwareHostLogbundle(module)
+ vmware_host_logbundle_mgr.get_logbundle()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_logbundle_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_logbundle_info.py
new file mode 100644
index 000000000..e066430ad
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_logbundle_info.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_logbundle_info
+short_description: Gathers manifest info for logbundle
+description:
+ - This module can be used to gather manifest information for logbundle from ESXi.
+author:
+ - sky-joker (@sky-joker)
+options:
+ esxi_hostname:
+ description:
+ - Name of the host system to fetch the manifests for logbundle.
+ type: str
+ required: true
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: fetch the manifests for logbundle from ESXi
+ community.vmware.vmware_host_logbundle_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ register: fetch_manifests_result
+'''
+
+RETURN = r'''
+manifests:
+ description: list of dictionary of manifest information for logbundle
+ returned: always
+ type: list
+ sample:
+ [
+ {
+ "enabled": "true",
+ "group": "System",
+ "id": "System:Base",
+ "name": "Base",
+ "vmOnly": "false"
+ },
+ {
+ "enabled": "false",
+ "group": "System",
+ "id": "System:BaseMinmal",
+ "name": "BaseMinmal",
+ "vmOnly": "false"
+ },
+ {
+ "enabled": "true",
+ "group": "Fcd",
+ "id": "Fcd:Catalog",
+ "name": "Catalog",
+ "vmOnly": "false"
+ },
+ {
+ "enabled": "false",
+ "group": "VirtualMachines",
+ "id": "VirtualMachines:CoreDumpHung",
+ "name": "CoreDumpHung",
+ "vmOnly": "true"
+ },
+ {
+ "enabled": "true",
+ "group": "System",
+ "id": "System:CoreDumps",
+ "name": "CoreDumps",
+ "vmOnly": "false"
+ }
+ ]
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+import xml.etree.ElementTree as ET
+
+
+class VMwareHostLogbundleInfo(PyVmomi):
+ def __init__(self, module):
+ super(VMwareHostLogbundleInfo, self).__init__(module)
+ self.esxi_hostname = self.params['esxi_hostname']
+
+ def generate_req_headers(self, url):
+ # get ticket
+ req = vim.SessionManager.HttpServiceRequestSpec(method='httpGet', url=url)
+ ticket = self.content.sessionManager.AcquireGenericServiceTicket(req)
+
+ headers = {
+ 'Content-Type': 'application/octet-stream',
+ 'Cookie': 'vmware_cgi_ticket=%s' % ticket.id
+ }
+
+ return headers
+
+ def get_listmanifests(self):
+ url = 'https://' + self.esxi_hostname + '/cgi-bin/vm-support.cgi?listmanifests=1'
+ headers = self.generate_req_headers(url)
+
+ try:
+ resp, info = fetch_url(self.module, method='GET', headers=headers, url=url)
+ manifest_list = ET.fromstring(resp.read())
+ manifests = []
+ for manifest in manifest_list[0]:
+ manifests.append(manifest.attrib)
+
+ self.module.exit_json(changed=False, manifests=manifests)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to fetch manifests from %s: %s" % (url, e))
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ esxi_hostname=dict(type='str', required=True)
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vmware_host_logbundle_info_mgr = VMwareHostLogbundleInfo(module)
+ vmware_host_logbundle_info_mgr.get_listmanifests()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_ntp.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_ntp.py
new file mode 100644
index 000000000..3d6b0612e
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_ntp.py
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_ntp
+short_description: Manage NTP server configuration of an ESXi host
+description:
+- This module can be used to configure, add or remove NTP servers from an ESXi host.
+- If C(state) is not given, the NTP servers will be configured in the exact sequence.
+- User can specify an ESXi hostname or Cluster name. In case of cluster name, all ESXi hosts are updated.
+author:
+- Abhijeet Kasurde (@Akasurde)
+- Christian Kotte (@ckotte)
+options:
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - This parameter is required if C(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - This parameter is required if C(esxi_hostname) is not specified.
+ type: str
+ ntp_servers:
+ description:
+ - "IP or FQDN of NTP server(s)."
+ - This accepts a list of NTP servers. For multiple servers, please look at the examples.
+ type: list
+ required: true
+ elements: str
+ state:
+ description:
+ - "present: Add NTP server(s), if specified server(s) are absent else do nothing."
+ - "absent: Remove NTP server(s), if specified server(s) are present else do nothing."
+ - Specified NTP server(s) will be configured if C(state) isn't specified.
+ choices: [ present, absent ]
+ type: str
+ verbose:
+ description:
+ - Verbose output of the configuration change.
+ - Explains if an NTP server was added, removed, or if the NTP server sequence was changed.
+ type: bool
+ required: false
+ default: false
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure NTP servers for an ESXi Host
+ community.vmware.vmware_host_ntp:
+ hostname: vcenter01.example.local
+ username: administrator@vsphere.local
+ password: SuperSecretPassword
+ esxi_hostname: esx01.example.local
+ ntp_servers:
+ - 0.pool.ntp.org
+ - 1.pool.ntp.org
+ delegate_to: localhost
+
+- name: Set NTP servers for all ESXi Host in given Cluster
+ community.vmware.vmware_host_ntp:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ state: present
+ ntp_servers:
+ - 0.pool.ntp.org
+ - 1.pool.ntp.org
+ delegate_to: localhost
+
+- name: Set NTP servers for an ESXi Host
+ community.vmware.vmware_host_ntp:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: present
+ ntp_servers:
+ - 0.pool.ntp.org
+ - 1.pool.ntp.org
+ delegate_to: localhost
+
+- name: Remove NTP servers for an ESXi Host
+ community.vmware.vmware_host_ntp:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: absent
+ ntp_servers:
+ - bad.server.ntp.org
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+host_ntp_status:
+ description: metadata about host system's NTP configuration
+ returned: always
+ type: dict
+ sample: {
+ "esx01.example.local": {
+ "ntp_servers_changed": ["time1.example.local", "time2.example.local", "time3.example.local", "time4.example.local"],
+ "ntp_servers": ["time3.example.local", "time4.example.local"],
+ "ntp_servers_previous": ["time1.example.local", "time2.example.local"],
+ },
+ "esx02.example.local": {
+ "ntp_servers_changed": ["time3.example.local"],
+ "ntp_servers_current": ["time1.example.local", "time2.example.local", "time3.example.local"],
+ "state": "present",
+ "ntp_servers_previous": ["time1.example.local", "time2.example.local"],
+ },
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+from ansible.module_utils._text import to_native
+
+
+class VmwareNtpConfigManager(PyVmomi):
+ """Class to manage configured NTP servers"""
+
+ def __init__(self, module):
+ super(VmwareNtpConfigManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.ntp_servers = self.params.get('ntp_servers', list())
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system.")
+ self.results = {}
+ self.desired_state = self.params.get('state', None)
+ self.verbose = module.params.get('verbose', False)
+
+ def update_ntp_servers(self, host, ntp_servers_configured, ntp_servers_to_change, operation='overwrite'):
+ """Update NTP server configuration"""
+ host_date_time_manager = host.configManager.dateTimeSystem
+ if host_date_time_manager:
+ # Prepare new NTP server list
+ if operation == 'overwrite':
+ new_ntp_servers = list(ntp_servers_to_change)
+ else:
+ new_ntp_servers = list(ntp_servers_configured)
+ if operation == 'add':
+ new_ntp_servers = new_ntp_servers + ntp_servers_to_change
+ elif operation == 'delete':
+ for server in ntp_servers_to_change:
+ if server in new_ntp_servers:
+ new_ntp_servers.remove(server)
+
+ # build verbose message
+ if self.verbose:
+ message = self.build_changed_message(
+ ntp_servers_configured,
+ new_ntp_servers,
+ ntp_servers_to_change,
+ operation
+ )
+
+ ntp_config_spec = vim.host.NtpConfig()
+ ntp_config_spec.server = new_ntp_servers
+ date_config_spec = vim.host.DateTimeConfig()
+ date_config_spec.ntpConfig = ntp_config_spec
+ try:
+ if not self.module.check_mode:
+ host_date_time_manager.UpdateDateTimeConfig(date_config_spec)
+ if self.verbose:
+ self.results[host.name]['msg'] = message
+ except vim.fault.HostConfigFault as config_fault:
+ self.module.fail_json(
+ msg="Failed to configure NTP for host '%s' due to : %s" %
+ (host.name, to_native(config_fault.msg))
+ )
+
+ return new_ntp_servers
+
+ def check_host_state(self):
+ """Check ESXi host configuration"""
+ change_list = []
+ changed = False
+ for host in self.hosts:
+ self.results[host.name] = dict()
+ if host.runtime.connectionState == "connected":
+ ntp_servers_configured, ntp_servers_to_change = self.check_ntp_servers(host=host)
+ # add/remove NTP servers
+ if self.desired_state:
+ self.results[host.name]['state'] = self.desired_state
+ if ntp_servers_to_change:
+ self.results[host.name]['ntp_servers_changed'] = ntp_servers_to_change
+ operation = 'add' if self.desired_state == 'present' else 'delete'
+ new_ntp_servers = self.update_ntp_servers(
+ host=host,
+ ntp_servers_configured=ntp_servers_configured,
+ ntp_servers_to_change=ntp_servers_to_change,
+ operation=operation
+ )
+ self.results[host.name]['ntp_servers_current'] = new_ntp_servers
+ self.results[host.name]['changed'] = True
+ change_list.append(True)
+ else:
+ self.results[host.name]['ntp_servers_current'] = ntp_servers_configured
+ if self.verbose:
+ self.results[host.name]['msg'] = (
+ "NTP servers already added" if self.desired_state == 'present'
+ else "NTP servers already removed"
+ )
+ self.results[host.name]['changed'] = False
+ change_list.append(False)
+ # overwrite NTP servers
+ else:
+ self.results[host.name]['ntp_servers'] = self.ntp_servers
+ if ntp_servers_to_change:
+ self.results[host.name]['ntp_servers_changed'] = self.get_differt_entries(
+ ntp_servers_configured,
+ ntp_servers_to_change
+ )
+ self.update_ntp_servers(
+ host=host,
+ ntp_servers_configured=ntp_servers_configured,
+ ntp_servers_to_change=ntp_servers_to_change,
+ operation='overwrite'
+ )
+ self.results[host.name]['changed'] = True
+ change_list.append(True)
+ else:
+ if self.verbose:
+ self.results[host.name]['msg'] = "NTP servers already configured"
+ self.results[host.name]['changed'] = False
+ change_list.append(False)
+ else:
+ self.results[host.name]['changed'] = False
+ self.results[host.name]['msg'] = "Host %s is disconnected and cannot be changed." % host.name
+
+ if any(change_list):
+ changed = True
+ self.module.exit_json(changed=changed, host_ntp_status=self.results)
+
+ def check_ntp_servers(self, host):
+ """Check configured NTP servers"""
+ update_ntp_list = []
+ host_datetime_system = host.configManager.dateTimeSystem
+ if host_datetime_system:
+ ntp_servers_configured = host_datetime_system.dateTimeInfo.ntpConfig.server
+ # add/remove NTP servers
+ if self.desired_state:
+ for ntp_server in self.ntp_servers:
+ if self.desired_state == 'present' and ntp_server not in ntp_servers_configured:
+ update_ntp_list.append(ntp_server)
+ if self.desired_state == 'absent' and ntp_server in ntp_servers_configured:
+ update_ntp_list.append(ntp_server)
+ # overwrite NTP servers
+ else:
+ if ntp_servers_configured != self.ntp_servers:
+ for ntp_server in self.ntp_servers:
+ update_ntp_list.append(ntp_server)
+ if update_ntp_list:
+ self.results[host.name]['ntp_servers_previous'] = ntp_servers_configured
+
+ return ntp_servers_configured, update_ntp_list
+
+ def build_changed_message(self, ntp_servers_configured, new_ntp_servers, ntp_servers_to_change, operation):
+ """Build changed message"""
+ check_mode = 'would be ' if self.module.check_mode else ''
+ if operation == 'overwrite':
+ # get differences
+ add = self.get_not_in_list_one(new_ntp_servers, ntp_servers_configured)
+ remove = self.get_not_in_list_one(ntp_servers_configured, new_ntp_servers)
+ diff_servers = list(ntp_servers_configured)
+ if add and remove:
+ for server in add:
+ diff_servers.append(server)
+ for server in remove:
+ diff_servers.remove(server)
+ if new_ntp_servers != diff_servers:
+ message = (
+ "NTP server %s %sadded and %s %sremoved and the server sequence %schanged as well" %
+ (self.array_to_string(add), check_mode, self.array_to_string(remove), check_mode, check_mode)
+ )
+ else:
+ if new_ntp_servers != ntp_servers_configured:
+ message = (
+ "NTP server %s %sreplaced with %s" %
+ (self.array_to_string(remove), check_mode, self.array_to_string(add))
+ )
+ else:
+ message = (
+ "NTP server %s %sremoved and %s %sadded" %
+ (self.array_to_string(remove), check_mode, self.array_to_string(add), check_mode)
+ )
+ elif add:
+ for server in add:
+ diff_servers.append(server)
+ if new_ntp_servers != diff_servers:
+ message = (
+ "NTP server %s %sadded and the server sequence %schanged as well" %
+ (self.array_to_string(add), check_mode, check_mode)
+ )
+ else:
+ message = "NTP server %s %sadded" % (self.array_to_string(add), check_mode)
+ elif remove:
+ for server in remove:
+ diff_servers.remove(server)
+ if new_ntp_servers != diff_servers:
+ message = (
+ "NTP server %s %sremoved and the server sequence %schanged as well" %
+ (self.array_to_string(remove), check_mode, check_mode)
+ )
+ else:
+ message = "NTP server %s %sremoved" % (self.array_to_string(remove), check_mode)
+ else:
+ message = "NTP server sequence %schanged" % check_mode
+ elif operation == 'add':
+ message = "NTP server %s %sadded" % (self.array_to_string(ntp_servers_to_change), check_mode)
+ elif operation == 'delete':
+ message = "NTP server %s %sremoved" % (self.array_to_string(ntp_servers_to_change), check_mode)
+
+ return message
+
+ @staticmethod
+ def get_not_in_list_one(list1, list2):
+ """Return entries that ore not in list one"""
+ return [x for x in list1 if x not in set(list2)]
+
+ @staticmethod
+ def array_to_string(array):
+ """Return string from array"""
+ if len(array) > 2:
+ string = (
+ ', '.join("'{0}'".format(element) for element in array[:-1]) + ', and '
+ + "'{0}'".format(str(array[-1]))
+ )
+ elif len(array) == 2:
+ string = ' and '.join("'{0}'".format(element) for element in array)
+ elif len(array) == 1:
+ string = "'{0}'".format(array[0])
+ return string
+
+ @staticmethod
+ def get_differt_entries(list1, list2):
+ """Return different entries of two lists"""
+ return [a for a in list1 + list2 if (a not in list1) or (a not in list2)]
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ ntp_servers=dict(type='list', required=True, elements='str'),
+ state=dict(type='str', choices=['absent', 'present']),
+ verbose=dict(type='bool', default=False, required=False)
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ vmware_host_ntp_config = VmwareNtpConfigManager(module)
+ vmware_host_ntp_config.check_host_state()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_ntp_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_ntp_info.py
new file mode 100644
index 000000000..1515328a8
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_ntp_info.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_ntp_info
+short_description: Gathers info about NTP configuration on an ESXi host
+description:
+- This module can be used to gather information about NTP configurations on an ESXi host.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - NTP config information about each ESXi server will be returned for the given cluster.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname.
+ - NTP config information about this ESXi server will be returned.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather NTP info about all ESXi Host in the given Cluster
+ community.vmware.vmware_host_ntp_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: cluster_name
+ delegate_to: localhost
+ register: cluster_host_ntp
+
+- name: Gather NTP info about ESXi Host
+ community.vmware.vmware_host_ntp_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+ register: host_ntp
+'''
+
+RETURN = r'''
+hosts_ntp_info:
+ description:
+ - dict with hostname as key and dict with NTP infos as value
+ returned: hosts_ntp_info
+ type: dict
+ sample: {
+ "10.76.33.226": [
+ {
+ "ntp_servers": [],
+ "time_zone_description": "UTC",
+ "time_zone_gmt_offset": 0,
+ "time_zone_identifier": "UTC",
+ "time_zone_name": "UTC"
+ }
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VmwareNtpInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareNtpInfoManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+
+ def gather_ntp_info(self):
+ hosts_info = {}
+ for host in self.hosts:
+ host_ntp_info = []
+ host_date_time_manager = host.configManager.dateTimeSystem
+ if host_date_time_manager:
+ host_ntp_info.append(
+ dict(
+ time_zone_identifier=host_date_time_manager.dateTimeInfo.timeZone.key,
+ time_zone_name=host_date_time_manager.dateTimeInfo.timeZone.name,
+ time_zone_description=host_date_time_manager.dateTimeInfo.timeZone.description,
+ time_zone_gmt_offset=host_date_time_manager.dateTimeInfo.timeZone.gmtOffset,
+ ntp_servers=list(host_date_time_manager.dateTimeInfo.ntpConfig.server)
+ )
+ )
+ hosts_info[host.name] = host_ntp_info
+ return hosts_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True,
+ )
+
+ vmware_host_ntp_config = VmwareNtpInfoManager(module)
+ module.exit_json(changed=False, hosts_ntp_info=vmware_host_ntp_config.gather_ntp_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_package_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_package_info.py
new file mode 100644
index 000000000..9e53f69da
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_package_info.py
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_package_info
+short_description: Gathers info about available packages on an ESXi host
+description:
+- This module can be used to gather information about available packages and their status on an ESXi host.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - Package information about each ESXi server will be returned for given cluster.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname.
+ - Package information about this ESXi server will be returned.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info about all ESXi Host in given Cluster
+ community.vmware.vmware_host_package_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: cluster_name
+ delegate_to: localhost
+ register: cluster_host_packages
+
+- name: Gather info about ESXi Host
+ community.vmware.vmware_host_package_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+ register: host_packages
+'''
+
+RETURN = r'''
+hosts_package_info:
+ description:
+ - dict with hostname as key and dict with package information as value
+ returned: hosts_package_info
+ type: dict
+ sample: { "hosts_package_info": { "localhost.localdomain": []}}
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VmwarePackageManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwarePackageManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+
+ def gather_package_info(self):
+ hosts_info = {}
+ for host in self.hosts:
+ host_package_info = []
+ host_pkg_mgr = host.configManager.imageConfigManager
+ if host_pkg_mgr:
+ pkgs = host_pkg_mgr.FetchSoftwarePackages()
+ for pkg in pkgs:
+ host_package_info.append(dict(
+ name=pkg.name,
+ version=pkg.version,
+ vendor=pkg.vendor,
+ summary=pkg.summary,
+ description=pkg.description,
+ acceptance_level=pkg.acceptanceLevel,
+ maintenance_mode_required=pkg.maintenanceModeRequired,
+ creation_date=pkg.creationDate,
+ ))
+ hosts_info[host.name] = host_package_info
+ return hosts_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True,
+ )
+
+ vmware_host_package_config = VmwarePackageManager(module)
+ module.exit_json(changed=False, hosts_package_info=vmware_host_package_config.gather_package_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_passthrough.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_passthrough.py
new file mode 100644
index 000000000..9693a9d01
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_passthrough.py
@@ -0,0 +1,359 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: vmware_host_passthrough
+short_description: Manage PCI device passthrough settings on host
+author:
+ - sky-joker (@sky-joker)
+description:
+ - This module can be managed PCI device passthrough settings on host.
+notes:
+ - Supports C(check_mode).
+options:
+ cluster:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - This parameter is required if C(esxi_hostname) is not specified.
+ aliases:
+ - cluster_name
+ type: str
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - This parameter is required if C(cluster_name) is not specified.
+ - User can specify specific host from the cluster.
+ type: str
+ devices:
+ description:
+ - List of PCI device name or id.
+ suboptions:
+ device:
+ description:
+ - Name of PCI device to enable passthrough.
+ aliases:
+ - name
+ - device_name
+ type: str
+ elements: dict
+ required: true
+ type: list
+ state:
+ description:
+ - If I(state=present), passthrough of PCI device will be enabled.
+ - If I(state=absent), passthrough of PCI device will be disabled.
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+"""
+
+EXAMPLES = r"""
+- name: Enable PCI device passthrough against the whole ESXi in a cluster
+ community.vmware.vmware_host_passthrough:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ cluster: "{{ ccr1 }}"
+ devices:
+ - device_name: "Dual Band Wireless AC 3165"
+ state: present
+
+- name: Enable PCI device passthrough against one ESXi
+ community.vmware.vmware_host_passthrough:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ esxi_hostname: "{{ esxi1 }}"
+ devices:
+ - device_name: "Dual Band Wireless AC 3165"
+ state: present
+
+- name: Enable PCI device passthrough with PCI ids
+ community.vmware.vmware_host_passthrough:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ esxi_hostname: "{{ esxi1 }}"
+ devices:
+ - device: '0000:03:00.0'
+ - device: '0000:00:02.0'
+ state: present
+
+- name: Disable PCI device passthrough against the whole ESXi in a cluster
+ community.vmware.vmware_host_passthrough:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ cluster: "{{ ccr1 }}"
+ devices:
+ - device_name: "Dual Band Wireless AC 3165"
+ state: absent
+"""
+
+RETURN = r"""
+passthrough_configs:
+ description:
+ - list of that PCI devices have been enabled passthrough for each host system.
+ returned: changed
+ type: list
+ elements: dict
+ sample: >-
+ [
+ {
+ "esxi-01.example.com": [
+ {
+ "device_id": "0000:03:00.0",
+ "device_name": "Dual Band Wireless AC 3165",
+ "passthruEnabled": true
+ }
+ ]
+ },
+ {
+ "esxi-02.example.com": [
+ {
+ "device_id": "0000:03:00.0",
+ "device_name": "Dual Band Wireless AC 3165",
+ "passthruEnabled": true
+ }
+ ]
+ }
+ ]
+"""
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+import copy
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+
+class VMwareHostPassthrough(PyVmomi):
+ def __init__(self, module):
+ super(VMwareHostPassthrough, self).__init__(module)
+ self.cluster = self.params['cluster']
+ self.esxi_hostname = self.params['esxi_hostname']
+ self.devices = self.params['devices']
+ self.state = self.params['state']
+
+ self.hosts = self.get_all_host_objs(cluster_name=self.cluster, esxi_host_name=self.esxi_hostname)
+ # Looks for a specified ESXi host from a cluster if specified cluster and ESXi host.
+ if self.cluster and self.esxi_hostname:
+ self.hosts = [host_obj for host_obj in self.hosts if host_obj.name == self.esxi_hostname]
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system: %s" % self.esxi_hostname)
+
+ self.result = dict(changed=False, passthrough_configs=[], diff={})
+
+ def collect_pci_device_ids_for_supported_passthrough(self):
+ """
+ Collect device ids for supported passthrough from pciPassthruInfo.
+ The condition for whether support passthrough is that passthruCapable property has True.
+ """
+ self.hosts_passthrough_pci_device_id = {}
+ for host_obj in self.hosts:
+ esxi_hostname = host_obj.name
+ self.hosts_passthrough_pci_device_id[esxi_hostname] = {
+ 'host_obj': host_obj,
+ 'pci_device_ids': []
+ }
+ for pci_device in host_obj.config.pciPassthruInfo:
+ if pci_device.passthruCapable:
+ self.hosts_passthrough_pci_device_id[esxi_hostname]['pci_device_ids'].append(pci_device)
+
+ def collect_pci_devices_able_to_enable_passthrough(self):
+ """
+ Collect devices able to enable passthrough based on device id.
+ """
+ self.hosts_passthrough_pci_devices = []
+ for esxi_hostname, value in self.hosts_passthrough_pci_device_id.items():
+ pci_devices = []
+ for device_id in value['pci_device_ids']:
+ for device in value['host_obj'].hardware.pciDevice:
+ if device.id == device_id.id:
+ pci_devices.append({
+ 'device_name': device.deviceName,
+ 'device_id': device.id,
+ 'passthruEnabled': device_id.passthruEnabled,
+ })
+ self.hosts_passthrough_pci_devices.append({
+ esxi_hostname: {
+ 'host_obj': value['host_obj'],
+ 'pci_devices': pci_devices
+ }
+ })
+
+ def check_whether_devices_exist(self):
+ """
+ Check specified pci devices are exists.
+ """
+ self.existent_devices = []
+ self.non_existent_devices = []
+
+ # The keys use in checking pci devices existing.
+ keys = ['device_name', 'device_id']
+
+ for host_pci_device in self.hosts_passthrough_pci_devices:
+ pci_devices = []
+ for esxi_hostname, value in host_pci_device.items():
+ for target_device in self.devices:
+ device = target_device['device']
+ if device in [pci_device.get(key) for key in keys for pci_device in value['pci_devices']]:
+ pci_devices.append(
+ [
+ pci_device for pci_device in value['pci_devices']
+ if device == pci_device['device_name'] or device == pci_device['device_id']
+ ]
+ )
+ else:
+ self.non_existent_devices.append(device)
+ self.existent_devices.append({
+ esxi_hostname: {
+ 'host_obj': value['host_obj'],
+ 'checked_pci_devices': self.de_duplication(sum(pci_devices, []))
+ }
+ })
+
+ def diff_passthrough_config(self):
+ """
+ Check there are differences between a new and existing config each ESXi host.
+ """
+ # Make the diff_config variable to check the difference between a new and existing config.
+ self.diff_config = dict(before={}, after={})
+
+ self.change_flag = False
+ self.host_target_device_to_change_configuration = {}
+ state = True if self.state == "present" else False
+ for host_has_checked_pci_devices in self.existent_devices:
+ for esxi_hostname, value in host_has_checked_pci_devices.items():
+ for key in 'before', 'after':
+ self.diff_config[key][esxi_hostname] = []
+ self.host_target_device_to_change_configuration[esxi_hostname] = {
+ 'host_obj': None,
+ 'new_configs': []
+ }
+ for target_device in self.devices:
+ device = target_device['device']
+ for checked_pci_device in value['checked_pci_devices']:
+ if device == checked_pci_device['device_name'] or device == checked_pci_device['device_id']:
+ before = dict(checked_pci_device)
+ after = dict(copy.deepcopy(checked_pci_device))
+
+ if state != checked_pci_device['passthruEnabled']:
+ self.change_flag = True
+ after['passthruEnabled'] = state
+
+ self.host_target_device_to_change_configuration[esxi_hostname]['new_configs'].append(after)
+ self.host_target_device_to_change_configuration[esxi_hostname]['host_obj'] = value['host_obj']
+ self.diff_config['before'][esxi_hostname].append(before)
+ self.diff_config['after'][esxi_hostname].append(after)
+
+ # De-duplicate pci device data and sort.
+ self.diff_config['before'][esxi_hostname] = sorted(
+ self.de_duplication(self.diff_config['before'][esxi_hostname]),
+ key=lambda d: d['device_name']
+ )
+ self.diff_config['after'][esxi_hostname] = sorted(
+ self.de_duplication(self.diff_config['after'][esxi_hostname]),
+ key=lambda d: d['device_name']
+ )
+
+ def generate_passthrough_configurations_to_be_applied(self):
+ """
+ Generate configs to enable or disable PCI device passthrough.
+ The configs are generated against only ESXi host has PCI device to be changed.
+ """
+ self.host_passthrough_configs = {}
+ for esxi_hostname, value in self.host_target_device_to_change_configuration.items():
+ self.host_passthrough_configs[esxi_hostname] = {
+ 'host_obj': value['host_obj'],
+ 'generated_new_configs': []
+ }
+ if value['new_configs']:
+ state = True if self.state == "present" else False
+ for new_config in value['new_configs']:
+ config = vim.host.PciPassthruConfig()
+ config.passthruEnabled = state
+ config.id = new_config['device_id']
+ self.host_passthrough_configs[esxi_hostname]['generated_new_configs'].append(config)
+
+ def de_duplication(self, data):
+ """
+ De-duplicate dictionaries in a list.
+ """
+ return [
+ dict(s) for s in set(frozenset(d.items()) for d in data)
+ ]
+
+ def execute(self):
+ self.collect_pci_device_ids_for_supported_passthrough()
+ self.collect_pci_devices_able_to_enable_passthrough()
+
+ self.check_whether_devices_exist()
+ if self.non_existent_devices:
+ self.module.fail_json(msg="Failed to fined device: %s" % list(set(self.non_existent_devices)))
+
+ self.diff_passthrough_config()
+ if self.change_flag and self.module.check_mode is False:
+ self.generate_passthrough_configurations_to_be_applied()
+ for value in self.host_passthrough_configs.values():
+ try:
+ host_obj = value['host_obj']
+ config = value['generated_new_configs']
+ host_obj.configManager.pciPassthruSystem.UpdatePassthruConfig(config)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to operate PCI device passthrough: %s" % e)
+
+ # ESXi host configuration will be included in the result if it will be changed.
+ self.result['passthrough_configs'] = [
+ {
+ esxi_hostname: value['new_configs']
+ } for esxi_hostname, value in self.host_target_device_to_change_configuration.items() if value['new_configs']
+ ]
+ self.result['changed'] = self.change_flag
+ self.result['diff'] = self.diff_config
+ self.module.exit_json(**self.result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster=dict(type='str', aliases=['cluster_name']),
+ esxi_hostname=dict(type='str'),
+ devices=dict(type='list', elements='dict', required=True,
+ options=dict(
+ device=dict(type='str', aliases=['name', 'device_name'])
+ )),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster', 'esxi_hostname']
+ ],
+ supports_check_mode=True)
+
+ vmware_host_passthrough = VMwareHostPassthrough(module)
+ vmware_host_passthrough.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_powermgmt_policy.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_powermgmt_policy.py
new file mode 100644
index 000000000..6573e591c
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_powermgmt_policy.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_powermgmt_policy
+short_description: Manages the Power Management Policy of an ESXI host system
+description:
+- This module can be used to manage the Power Management Policy of ESXi host systems in given vCenter infrastructure.
+author:
+- Christian Kotte (@ckotte) <christian.kotte@gmx.de>
+options:
+ policy:
+ description:
+ - Set the Power Management Policy of the host system.
+ choices: [ 'high-performance', 'balanced', 'low-power', 'custom' ]
+ default: 'balanced'
+ type: str
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - This is required parameter if C(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - This is required parameter if C(esxi_hostname) is not specified.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Set the Power Management Policy of a host system to high-performance
+ community.vmware.vmware_host_powermgmt_policy:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_host }}'
+ policy: high-performance
+ delegate_to: localhost
+
+- name: Set the Power Management Policy of all host systems from cluster to high-performance
+ community.vmware.vmware_host_powermgmt_policy:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ policy: high-performance
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: metadata about host system's Power Management Policy
+ returned: always
+ type: dict
+ sample: {
+ "changed": true,
+ "result": {
+ "esxi01": {
+ "changed": true,
+ "current_state": "high-performance",
+ "desired_state": "high-performance",
+ "msg": "Power policy changed",
+ "previous_state": "balanced"
+ }
+ }
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VmwareHostPowerManagement(PyVmomi):
+ """
+ Class to manage power management policy of an ESXi host system
+ """
+
+ def __init__(self, module):
+ super(VmwareHostPowerManagement, self).__init__(module)
+ cluster_name = self.params.get('cluster_name')
+ esxi_host_name = self.params.get('esxi_hostname')
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system with given configuration.")
+
+ def ensure(self):
+ """
+ Manage power management policy of an ESXi host system
+ """
+ results = dict(changed=False, result=dict())
+ policy = self.params.get('policy')
+ host_change_list = []
+ power_policies = {
+ 'high-performance': {
+ 'key': 1,
+ 'short_name': 'static'
+ },
+ 'balanced': {
+ 'key': 2,
+ 'short_name': 'dynamic'
+ },
+ 'low-power': {
+ 'key': 3,
+ 'short_name': 'low'
+ },
+ 'custom': {
+ 'key': 4,
+ 'short_name': 'custom'
+ }
+ }
+
+ for host in self.hosts:
+ changed = False
+ results['result'][host.name] = dict(msg='')
+
+ power_system = host.configManager.powerSystem
+
+ # get current power policy
+ power_system_info = power_system.info
+ current_host_power_policy = power_system_info.currentPolicy
+
+ # the "name" and "description" parameters are pretty useless
+ # they store only strings containing "PowerPolicy.<shortName>.name" and "PowerPolicy.<shortName>.description"
+ if current_host_power_policy.shortName == "static":
+ current_policy = 'high-performance'
+ elif current_host_power_policy.shortName == "dynamic":
+ current_policy = 'balanced'
+ elif current_host_power_policy.shortName == "low":
+ current_policy = 'low-power'
+ elif current_host_power_policy.shortName == "custom":
+ current_policy = 'custom'
+
+ results['result'][host.name]['desired_state'] = policy
+
+ # Don't do anything if the power policy is already configured
+ if current_host_power_policy.key == power_policies[policy]['key']:
+ results['result'][host.name]['changed'] = changed
+ results['result'][host.name]['previous_state'] = current_policy
+ results['result'][host.name]['current_state'] = policy
+ results['result'][host.name]['msg'] = "Power policy is already configured"
+ else:
+ # get available power policies and check if policy is included
+ supported_policy = False
+ power_system_capability = power_system.capability
+ available_host_power_policies = power_system_capability.availablePolicy
+ for available_policy in available_host_power_policies:
+ if available_policy.shortName == power_policies[policy]['short_name']:
+ supported_policy = True
+ if supported_policy:
+ if not self.module.check_mode:
+ try:
+ power_system.ConfigurePowerPolicy(key=power_policies[policy]['key'])
+ changed = True
+ results['result'][host.name]['changed'] = True
+ results['result'][host.name]['msg'] = "Power policy changed"
+ except vmodl.fault.InvalidArgument:
+ self.module.fail_json(msg="Invalid power policy key provided for host '%s'" % host.name)
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(msg="Failed to configure power policy for host '%s': %s" %
+ (host.name, to_native(host_config_fault.msg)))
+ else:
+ changed = True
+ results['result'][host.name]['changed'] = True
+ results['result'][host.name]['msg'] = "Power policy will be changed"
+ results['result'][host.name]['previous_state'] = current_policy
+ results['result'][host.name]['current_state'] = policy
+ else:
+ changed = False
+ results['result'][host.name]['changed'] = changed
+ results['result'][host.name]['previous_state'] = current_policy
+ results['result'][host.name]['current_state'] = current_policy
+ self.module.fail_json(msg="Power policy '%s' isn't supported for host '%s'" %
+ (policy, host.name))
+
+ host_change_list.append(changed)
+
+ if any(host_change_list):
+ results['changed'] = True
+ self.module.exit_json(**results)
+
+
+def main():
+ """
+ Main
+ """
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ policy=dict(type='str', default='balanced',
+ choices=['high-performance', 'balanced', 'low-power', 'custom']),
+ esxi_hostname=dict(type='str', required=False),
+ cluster_name=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ host_power_management = VmwareHostPowerManagement(module)
+ host_power_management.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_powerstate.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_powerstate.py
new file mode 100644
index 000000000..a68cac141
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_powerstate.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_powerstate
+short_description: Manages power states of host systems in vCenter
+description:
+- This module can be used to manage power states of host systems in given vCenter infrastructure.
+- User can set power state to 'power-down-to-standby', 'power-up-from-standby', 'shutdown-host' and 'reboot-host'.
+- State 'reboot-host', 'shutdown-host' and 'power-down-to-standby' are not supported by all the host systems.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ state:
+ description:
+ - Set the state of the host system.
+ choices: [ 'power-down-to-standby', 'power-up-from-standby', 'shutdown-host', 'reboot-host' ]
+ default: 'shutdown-host'
+ type: str
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - This is required parameter if C(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - This is required parameter if C(esxi_hostname) is not specified.
+ type: str
+ force:
+ description:
+ - 'This parameter specify if the host should be proceeding with user defined powerstate
+ regardless of whether it is in maintenance mode.'
+ - 'If C(state) set to C(reboot-host) and C(force) as C(true), then host system is rebooted regardless of whether it is in maintenance mode.'
+ - 'If C(state) set to C(shutdown-host) and C(force) as C(true), then host system is shutdown regardless of whether it is in maintenance mode.'
+ - 'If C(state) set to C(power-down-to-standby) and C(force) to C(true), then all powered off VMs will evacuated.'
+ - 'Not applicable if C(state) set to C(power-up-from-standby).'
+ type: bool
+ default: false
+ timeout:
+ description:
+ - 'This parameter defines timeout for C(state) set to C(power-down-to-standby) or C(power-up-from-standby).'
+ - 'Ignored if C(state) set to C(reboot-host) or C(shutdown-host).'
+ - 'This parameter is defined in seconds.'
+ default: 600
+ type: int
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Set the state of a host system to reboot
+ community.vmware.vmware_host_powerstate:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: reboot-host
+ delegate_to: localhost
+ register: reboot_host
+
+- name: Set the state of a host system to power down to standby
+ community.vmware.vmware_host_powerstate:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: power-down-to-standby
+ delegate_to: localhost
+ register: power_down
+
+- name: Set the state of all host systems from cluster to reboot
+ community.vmware.vmware_host_powerstate:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ state: reboot-host
+ delegate_to: localhost
+ register: reboot_host
+'''
+
+RETURN = r'''
+result:
+ description: metadata about host system's state
+ returned: always
+ type: dict
+ sample: {
+ "esxi01": {
+ "msg": "power down 'esxi01' to standby",
+ "error": "",
+ },
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, TaskError
+from ansible.module_utils._text import to_native
+
+
+class VmwareHostPowerManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareHostPowerManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name')
+ esxi_host_name = self.params.get('esxi_hostname')
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system with given configuration.")
+
+ def ensure(self):
+ """
+ Function to manage internal state of host system
+
+ """
+ results = dict(changed=False, result=dict())
+ state = self.params.get('state')
+ force = self.params.get('force')
+ timeout = self.params.get('timeout')
+ host_change_list = []
+ for host in self.hosts:
+ changed = False
+ if not host.runtime.inMaintenanceMode and not force:
+ self.module.fail_json(msg="Current host system '%s' is not in maintenance mode,"
+ " please specify 'force' as True to proceed." % host.name)
+ if host.runtime.connectionState == 'notResponding':
+ self.module.fail_json(msg="Current host system '%s' can not be set in '%s'"
+ " mode as the host system is not responding." % (host.name, state))
+
+ results['result'][host.name] = dict(msg='', error='')
+ if state == 'reboot-host' and not host.capability.rebootSupported:
+ self.module.fail_json(msg="Current host '%s' can not be rebooted as the host system"
+ " does not have capability to reboot." % host.name)
+ elif state == 'shutdown-host' and not host.capability.shutdownSupported:
+ self.module.fail_json(msg="Current host '%s' can not be shut down as the host system"
+ " does not have capability to shut down." % host.name)
+ elif state in ['power-down-to-standby', 'power-up-from-standby'] and not host.capability.standbySupported:
+ self.module.fail_json(msg="Current host '%s' can not be '%s' as the host system"
+ " does not have capability to standby supported." % (host.name, state))
+
+ if state == 'reboot-host':
+ if not self.module.check_mode:
+ task = host.RebootHost_Task(force)
+ verb = "reboot '%s'" % host.name
+ elif state == 'shutdown-host':
+ if not self.module.check_mode:
+ task = host.ShutdownHost_Task(force)
+ verb = "shutdown '%s'" % host.name
+ elif state == 'power-down-to-standby':
+ if not self.module.check_mode:
+ task = host.PowerDownHostToStandBy_Task(timeout, force)
+ verb = "power down '%s' to standby" % host.name
+ elif state == 'power-up-from-standby':
+ if not self.module.check_mode:
+ task = host.PowerUpHostFromStandBy_Task(timeout)
+ verb = "power up '%s' from standby" % host.name
+
+ if not self.module.check_mode:
+ try:
+ success, result = wait_for_task(task)
+ if success:
+ changed = True
+ results['result'][host.name]['msg'] = verb
+ else:
+ results['result'][host.name]['error'] = result
+ except TaskError as task_error:
+ self.module.fail_json(msg="Failed to %s as host system due to : %s" % (verb,
+ str(task_error)))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to %s due to generic exception : %s" % (host.name,
+ to_native(generic_exc)))
+ else:
+ # Check mode
+ changed = True
+ results['result'][host.name]['msg'] = verb
+
+ host_change_list.append(changed)
+
+ if any(host_change_list):
+ results['changed'] = True
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='shutdown-host',
+ choices=['power-down-to-standby', 'power-up-from-standby', 'shutdown-host', 'reboot-host']),
+ esxi_hostname=dict(type='str', required=False),
+ cluster_name=dict(type='str', required=False),
+ force=dict(type='bool', default=False),
+ timeout=dict(type='int', default=600),
+
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ]
+ )
+
+ host_power_manager = VmwareHostPowerManager(module)
+ host_power_manager.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_scanhba.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_scanhba.py
new file mode 100644
index 000000000..0dfa1d707
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_scanhba.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_scanhba
+short_description: Rescan host HBA's and optionally refresh the storage system
+description:
+- This module can force a rescan of the hosts HBA subsystem which is needed when wanting to mount a new datastore.
+- You could use this before using M(community.vmware.vmware_host_datastore) to mount a new datastore to ensure your device/volume is ready.
+- You can also optionally force a Refresh of the Storage System in vCenter/ESXi Web Client.
+- All parameters and VMware object names are case sensitive.
+- You can supply an esxi_hostname or a cluster_name
+author:
+- Michael Eaton (@michaeldeaton)
+options:
+ esxi_hostname:
+ description:
+ - ESXi hostname to Rescan the storage subsystem on.
+ required: false
+ type: str
+ cluster_name:
+ description:
+ - Cluster name to Rescan the storage subsystem on (this will run the rescan task on each host in the cluster).
+ required: false
+ type: str
+ rescan_hba:
+ description:
+ - Rescan all host bus adapters for new storage devices. Rescanning all adapters can be slow.
+ required: false
+ default: true
+ type: bool
+ refresh_storage:
+ description:
+ - Refresh the storage system in vCenter/ESXi Web Client for each host found
+ required: false
+ default: false
+ type: bool
+ rescan_vmfs:
+ description:
+ - Rescan all known storage devices for new VMFS volumes.
+ required: false
+ default: false
+ type: bool
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Recan HBA's for a given ESXi host and refresh storage system objects
+ community.vmware.vmware_host_scanhba:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ inventory_hostname }}'
+ refresh_storage: true
+ delegate_to: localhost
+
+- name: Rescan HBA's for a given cluster - all found hosts will be scanned
+ community.vmware.vmware_host_scanhba:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ inventory_hostname }}'
+ refresh_storage: true
+ delegate_to: localhost
+
+- name: Rescan for new VMFS Volumes in a given cluster, but do not scan for new Devices - all found hosts will be scanned
+ community.vmware.vmware_host_scanhba:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ inventory_hostname }}'
+ rescan_vmfs: true
+ rescan_hba: false
+ delegate_to: localhost
+
+- name: Recan HBA's for a given ESXi host and don't refresh storage system objects
+ community.vmware.vmware_host_scanhba:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ inventory_hostname }}'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: return confirmation of requested host and updated / refreshed storage system
+ returned: always
+ type: dict
+ sample: {
+ "esxi01.example.com": {
+ "rescaned_hba": "true",
+ "refreshed_storage": "true",
+ "rescaned_vmfs": "true"
+ }
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VmwareHbaScan(PyVmomi):
+ def __init__(self, module):
+ super(VmwareHbaScan, self).__init__(module)
+
+ def scan(self):
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ cluster_name = self.params.get('cluster_name', None)
+ rescan_hba = self.params.get('rescan_hba', bool)
+ refresh_storage = self.params.get('refresh_storage', bool)
+ rescan_vmfs = self.params.get('rescan_vmfs', bool)
+ hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ results = dict(changed=True, result=dict())
+
+ if not hosts:
+ self.module.fail_json(msg="Failed to find any hosts.")
+
+ for host in hosts:
+ results['result'][host.name] = dict()
+ if rescan_hba is True:
+ host.configManager.storageSystem.RescanAllHba()
+
+ if refresh_storage is True:
+ host.configManager.storageSystem.RefreshStorageSystem()
+
+ if rescan_vmfs is True:
+ host.configManager.storageSystem.RescanVmfs()
+
+ results['result'][host.name]['rescaned_hba'] = rescan_hba
+ results['result'][host.name]['refreshed_storage'] = refresh_storage
+ results['result'][host.name]['rescaned_vmfs'] = rescan_vmfs
+
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ esxi_hostname=dict(type='str', required=False),
+ cluster_name=dict(type='str', required=False),
+ rescan_hba=dict(type='bool', default=True, required=False),
+ refresh_storage=dict(type='bool', default=False, required=False),
+ rescan_vmfs=dict(type='bool', default=False, required=False)
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=False
+ )
+
+ hbascan = VmwareHbaScan(module)
+ hbascan.scan()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_scsidisk_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_scsidisk_info.py
new file mode 100644
index 000000000..00cfb027a
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_scsidisk_info.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2020, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_scsidisk_info
+short_description: Gather information about SCSI disk attached to the given ESXi
+description:
+- This module can be used to gather information about SCSI disk attached to the given ESXi.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - SCSI disk information about this ESXi server will be returned.
+ - This parameter is required if I(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - SCSI disk information about each ESXi server will be returned for the given cluster.
+ - This parameter is required if I(esxi_hostname) is not specified.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Gather information SCSI disk attached to the given ESXi
+ community.vmware.vmware_host_scsidisk_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+
+- name: Gather information of all host systems from the given cluster
+ community.vmware.vmware_host_scsidisk_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+hosts_scsidisk_info:
+ description: metadata about host system SCSI disk information
+ returned: always
+ type: dict
+ sample: {
+ "10.65.201.106": [
+ {
+ "block": 41943040,
+ "block_size": 512,
+ "canonical_name": "t10.ATA_QEMU_HARDDISK_QM00001_",
+ "device_name": "/vmfs/devices/disks/t10.ATA_QEMU_HARDDISK_QM00001_",
+ "device_path": "/vmfs/devices/disks/t10.ATA_QEMU_HARDDISK_QM00001_",
+ "device_type": "disk",
+ "display_name": "Local ATA Disk (t10.ATA_QEMU_HARDDISK_QM00001_)",
+ "key": "key-vim.host.ScsiDisk-0100000000514d30303030312020202020202020202020202051454d552048",
+ "local_disk": true,
+ "lun_type": "disk",
+ "model": "QEMU HARDDISK ",
+ "perenniallyReserved": null,
+ "protocol_endpoint": false,
+ "revision": "1.5.",
+ "scsi_disk_type": "native512",
+ "scsi_level": 5,
+ "serial_number": "unavailable",
+ "ssd": false,
+ "uuid": "0100000000514d30303030312020202020202020202020202051454d552048",
+ "vStorageSupport": "vStorageUnsupported",
+ "vendor": "ATA "
+ }
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VmwareHostDiskManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareHostDiskManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name')
+ esxi_host_name = self.params.get('esxi_hostname')
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system with given configuration.")
+
+ def gather_disk_info(self):
+ """
+ Gather information about SCSI disks
+
+ """
+ results = dict(changed=False, hosts_scsidisk_info=dict())
+ for host in self.hosts:
+ disk_info = []
+ storage_system = host.configManager.storageSystem
+ for disk in storage_system.storageDeviceInfo.scsiLun:
+ temp_disk_info = {
+ 'device_name': disk.deviceName,
+ 'device_type': disk.deviceType,
+ 'key': disk.key,
+ 'uuid': disk.uuid,
+ 'canonical_name': disk.canonicalName,
+ 'display_name': disk.displayName,
+ 'lun_type': disk.lunType,
+ 'vendor': disk.vendor,
+ 'model': disk.model,
+ 'revision': disk.revision,
+ 'scsi_level': disk.scsiLevel,
+ 'serial_number': disk.serialNumber,
+ 'vStorageSupport': disk.vStorageSupport,
+ 'protocol_endpoint': disk.protocolEndpoint,
+ 'perenniallyReserved': disk.perenniallyReserved,
+ 'block_size': None,
+ 'block': None,
+ 'device_path': '',
+ 'ssd': False,
+ 'local_disk': False,
+ 'scsi_disk_type': None,
+ }
+ if hasattr(disk, 'capacity'):
+ temp_disk_info['block_size'] = disk.capacity.blockSize
+ temp_disk_info['block'] = disk.capacity.block
+ if hasattr(disk, 'devicePath'):
+ temp_disk_info['device_path'] = disk.devicePath
+ if hasattr(disk, 'ssd'):
+ temp_disk_info['ssd'] = disk.ssd
+ if hasattr(disk, 'localDisk'):
+ temp_disk_info['local_disk'] = disk.localDisk
+ if hasattr(disk, 'scsiDiskType'):
+ temp_disk_info['scsi_disk_type'] = disk.scsiDiskType
+
+ disk_info.append(temp_disk_info)
+ results['hosts_scsidisk_info'][host.name] = disk_info
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ esxi_hostname=dict(type='str', required=False),
+ cluster_name=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ]
+ )
+
+ host_scsidisk_manager = VmwareHostDiskManager(module)
+ host_scsidisk_manager.gather_disk_info()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_service_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_service_info.py
new file mode 100644
index 000000000..fe3b83573
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_service_info.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_service_info
+short_description: Gathers info about an ESXi host's services
+description:
+- This module can be used to gather information about an ESXi host's services.
+author:
+- Abhijeet Kasurde (@Akasurde)
+notes:
+- If source package name is not available then fact is populated as null.
+options:
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - Service information about each ESXi server will be returned for given cluster.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname.
+ - Service information about this ESXi server will be returned.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info about all ESXi Host in given Cluster
+ community.vmware.vmware_host_service_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: cluster_name
+ delegate_to: localhost
+ register: cluster_host_services
+
+- name: Gather info about ESXi Host
+ community.vmware.vmware_host_service_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+ register: host_services
+'''
+
+RETURN = r'''
+host_service_info:
+ description:
+ - dict with hostname as key and dict with host service config information
+ returned: always
+ type: dict
+ sample: {
+ "10.76.33.226": [
+ {
+ "key": "DCUI",
+ "label": "Direct Console UI",
+ "policy": "on",
+ "required": false,
+ "running": true,
+ "uninstallable": false,
+ "source_package_name": "esx-base",
+ "source_package_desc": "This VIB contains all of the base functionality of vSphere ESXi."
+ },
+ {
+ "key": "TSM",
+ "label": "ESXi Shell",
+ "policy": "off",
+ "required": false,
+ "running": false,
+ "uninstallable": false,
+ "source_package_name": "esx-base",
+ "source_package_desc": "This VIB contains all of the base functionality of vSphere ESXi."
+ },
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VmwareServiceManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareServiceManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+
+ def gather_host_info(self):
+ hosts_info = {}
+ for host in self.hosts:
+ host_service_info = []
+ host_service_system = host.configManager.serviceSystem
+ if host_service_system and host_service_system.serviceInfo:
+ services = host_service_system.serviceInfo.service
+ for service in services:
+ host_service_info.append(
+ dict(
+ key=service.key,
+ label=service.label,
+ required=service.required,
+ uninstallable=service.uninstallable,
+ running=service.running,
+ policy=service.policy,
+ source_package_name=service.sourcePackage.sourcePackageName if service.sourcePackage else None,
+ source_package_desc=service.sourcePackage.description if service.sourcePackage else None,
+ )
+ )
+ hosts_info[host.name] = host_service_info
+ return hosts_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True,
+ )
+
+ vmware_host_service_config = VmwareServiceManager(module)
+ module.exit_json(changed=False, host_service_info=vmware_host_service_config.gather_host_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_service_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_service_manager.py
new file mode 100644
index 000000000..543ffcf31
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_service_manager.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_service_manager
+short_description: Manage services on a given ESXi host
+description:
+- This module can be used to manage (start, stop, restart) services on a given ESXi host.
+- If cluster_name is provided, specified service will be managed on all ESXi host belonging to that cluster.
+- If specific esxi_hostname is provided, then specified service will be managed on given ESXi host only.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - Service settings are applied to every ESXi host system/s in given cluster.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname.
+ - Service settings are applied to this ESXi host system.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+ state:
+ description:
+ - Desired state of service.
+ - "State value 'start' and 'present' has same effect."
+ - "State value 'stop' and 'absent' has same effect."
+ - State value C(unchanged) is added in version 1.14.0 to allow defining startup policy without defining or changing service state.
+ choices: [ absent, present, restart, start, stop, unchanged ]
+ type: str
+ default: 'start'
+ service_policy:
+ description:
+ - Set of valid service policy strings.
+ - If set C(on), then service should be started when the host starts up.
+ - If set C(automatic), then service should run if and only if it has open firewall ports.
+ - If set C(off), then Service should not be started when the host starts up.
+ choices: [ 'automatic', 'off', 'on' ]
+ type: str
+ service_name:
+ description:
+ - Name of Service to be managed. This is a brief identifier for the service, for example, ntpd, vxsyslogd etc.
+ - This value should be a valid ESXi service name.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Start ntpd service setting for all ESXi Host in given Cluster
+ community.vmware.vmware_host_service_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ service_name: ntpd
+ state: present
+ delegate_to: localhost
+
+- name: Start ntpd setting for an ESXi Host
+ community.vmware.vmware_host_service_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ service_name: ntpd
+ state: present
+ delegate_to: localhost
+
+- name: Start ntpd setting for an ESXi Host with Service policy
+ community.vmware.vmware_host_service_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ service_name: ntpd
+ service_policy: 'on'
+ state: present
+ delegate_to: localhost
+
+- name: Stop ntpd setting for an ESXi Host
+ community.vmware.vmware_host_service_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ service_name: ntpd
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''#
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+from ansible.module_utils._text import to_native
+
+
+class VmwareServiceManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareServiceManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.options = self.params.get('options', dict())
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ self.desired_state = self.params.get('state')
+ self.desired_policy = self.params.get('service_policy', None)
+ self.service_name = self.params.get('service_name')
+ self.results = {}
+
+ def service_ctrl(self):
+ changed = False
+ host_service_state = []
+ for host in self.hosts:
+ actual_service_state, actual_service_policy = self.check_service_state(host=host, service_name=self.service_name)
+ host_service_system = host.configManager.serviceSystem
+ if host_service_system:
+ changed_state = False
+ self.results[host.name] = dict(service_name=self.service_name,
+ actual_service_state='running' if actual_service_state else 'stopped',
+ actual_service_policy=actual_service_policy,
+ desired_service_policy=self.desired_policy,
+ desired_service_state=self.desired_state,
+ error='',
+ )
+ try:
+ if self.desired_state in ['start', 'present']:
+ if not actual_service_state:
+ if not self.module.check_mode:
+ host_service_system.StartService(id=self.service_name)
+ changed_state = True
+ elif self.desired_state in ['stop', 'absent']:
+ if actual_service_state:
+ if not self.module.check_mode:
+ host_service_system.StopService(id=self.service_name)
+ changed_state = True
+ elif self.desired_state == 'restart':
+ if not self.module.check_mode:
+ host_service_system.RestartService(id=self.service_name)
+ changed_state = True
+
+ if self.desired_policy:
+ if actual_service_policy != self.desired_policy:
+ if not self.module.check_mode:
+ host_service_system.UpdateServicePolicy(id=self.service_name,
+ policy=self.desired_policy)
+ changed_state = True
+
+ host_service_state.append(changed_state)
+ self.results[host.name].update(changed=changed_state)
+ except (vim.fault.InvalidState, vim.fault.NotFound,
+ vim.fault.HostConfigFault, vmodl.fault.InvalidArgument) as e:
+ self.results[host.name].update(changed=False,
+ error=to_native(e.msg))
+
+ if any(host_service_state):
+ changed = True
+ self.module.exit_json(changed=changed, host_service_status=self.results)
+
+ def check_service_state(self, host, service_name):
+ host_service_system = host.configManager.serviceSystem
+ if host_service_system:
+ services = host_service_system.serviceInfo.service
+ for service in services:
+ if service.key == service_name:
+ return service.running, service.policy
+
+ msg = "Failed to find '%s' service on host system '%s'" % (service_name, host.name)
+ cluster_name = self.params.get('cluster_name', None)
+ if cluster_name:
+ msg += " located on cluster '%s'" % cluster_name
+ msg += ", please check if you have specified a valid ESXi service name."
+ self.module.fail_json(msg=msg)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ state=dict(type='str', default='start', choices=['absent', 'present', 'restart', 'start', 'stop', 'unchanged']),
+ service_name=dict(type='str', required=True),
+ service_policy=dict(type='str', choices=['automatic', 'off', 'on']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ vmware_host_service = VmwareServiceManager(module)
+ vmware_host_service.service_ctrl()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_snmp.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_snmp.py
new file mode 100644
index 000000000..e0bfc404f
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_snmp.py
@@ -0,0 +1,535 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_snmp
+short_description: Configures SNMP on an ESXi host system
+description:
+- This module can be used to configure the embedded SNMP agent on an ESXi host.
+author:
+- Christian Kotte (@ckotte)
+notes:
+- You need to reset the agent (to factory defaults) if you want to clear all community strings, trap targets, or filters
+- SNMP v3 configuration isn't implemented yet
+options:
+ state:
+ description:
+ - Enable, disable, or reset the SNMP agent.
+ type: str
+ choices: [ disabled, enabled, reset ]
+ default: disabled
+ community:
+ description:
+ - List of SNMP community strings.
+ type: list
+ default: []
+ elements: str
+ snmp_port:
+ description:
+ - Port used by the SNMP agent.
+ type: int
+ default: 161
+ trap_targets:
+ description:
+ - A list of trap targets.
+ - You need to use C(hostname), C(port), and C(community) for each trap target.
+ default: []
+ type: list
+ elements: dict
+ trap_filter:
+ description:
+ - A list of trap oids for traps not to be sent by agent,
+ e.g. [ 1.3.6.1.4.1.6876.4.1.1.0, 1.3.6.1.4.1.6876.4.1.1.1 ]
+ - Use value C(reset) to clear settings.
+ type: list
+ elements: str
+ send_trap:
+ description:
+ - Send a test trap to validate the configuration.
+ type: bool
+ default: false
+ hw_source:
+ description:
+ - Source hardware events from IPMI sensors or CIM Indications.
+ - The embedded SNMP agent receives hardware events either from IPMI sensors C(sensors) or CIM indications C(indications).
+ type: str
+ choices: [ indications, sensors ]
+ default: indications
+ log_level:
+ description:
+ - Syslog logging level.
+ type: str
+ choices: [ debug, info, warning, error ]
+ default: info
+ sys_contact:
+ description:
+ - System contact who manages the system.
+ type: str
+ sys_location:
+ description:
+ - System location.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Enable and configure SNMP community
+ community.vmware.vmware_host_snmp:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ community: [ test ]
+ state: enabled
+ delegate_to: localhost
+
+- name: Configure SNMP traps and filters
+ community.vmware.vmware_host_snmp:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ community: [ test ]
+ trap_targets:
+ - hostname: 192.168.1.100
+ port: 162
+ community: test123
+ - hostname: 192.168.1.101
+ port: 162
+ community: test1234
+ trap_filter:
+ - 1.3.6.1.4.1.6876.4.1.1.0
+ - 1.3.6.1.4.1.6876.4.1.1.1
+ state: enabled
+ delegate_to: localhost
+
+- name: Enable and configure SNMP system contact and location
+ community.vmware.vmware_host_snmp:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ sys_contact: "admin@testemail.com"
+ sys_location: "Austin, USA"
+ state: enabled
+ delegate_to: localhost
+
+- name: Disable SNMP
+ community.vmware.vmware_host_snmp:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ state: disabled
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+results:
+ description: metadata about host system's SNMP configuration
+ returned: always
+ type: dict
+ sample: {
+ "esxi01": {
+ "changed": false,
+ "community": ["test"],
+ "hw_source": "indications",
+ "msg": "SNMP already configured properly",
+ "port": 161,
+ "state": "enabled",
+ "trap_targets": []
+ },
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_obj
+from ansible.module_utils._text import to_native
+
+
+class VmwareHostSnmp(PyVmomi):
+ """Manage SNMP configuration for an ESXi host system"""
+
+ def __init__(self, module):
+ super(VmwareHostSnmp, self).__init__(module)
+
+ if self.is_vcenter():
+ self.module.fail_json(
+ msg="You have to connect directly to the ESXi host. "
+ "It's not possible to configure SNMP through a vCenter connection."
+ )
+ else:
+ self.host = find_obj(self.content, [vim.HostSystem], None)
+ if self.host is None:
+ self.module.fail_json(msg="Failed to find host system.")
+
+ def ensure(self):
+ """Manage SNMP configuration for an ESXi host system"""
+ results = dict(changed=False, result=dict())
+ snmp_state = self.params.get('state')
+ snmp_port = self.params.get('snmp_port')
+ community = self.params.get('community')
+ desired_trap_targets = self.params.get("trap_targets")
+ hw_source = self.params.get("hw_source")
+ log_level = self.params.get("log_level")
+ send_trap = self.params.get("send_trap")
+ trap_filter = self.params.get("trap_filter")
+ sys_contact = self.params.get("sys_contact")
+ sys_location = self.params.get("sys_location")
+ event_filter = None
+ if trap_filter:
+ event_filter = ';'.join(trap_filter)
+ changed = False
+ reset_hint = None
+ changed_list = []
+ results = dict(msg='')
+
+ snmp_system = self.host.configManager.snmpSystem
+ if snmp_system:
+ if snmp_system.configuration:
+ snmp_config_spec = snmp_system.configuration
+ else:
+ self.module.fail_json(msg="SNMP agent configuration isn't supported on the ESXi host")
+ else:
+ self.module.fail_json(msg="SNMP system isn't available on the ESXi host")
+
+ # Check state
+ results['state'] = snmp_state
+ if snmp_state == 'reset':
+ changed = True
+ # Get previous config
+ if snmp_config_spec.enabled:
+ results['state_previous'] = 'enabled'
+ else:
+ results['state_previous'] = 'disabled'
+ results['port_previous'] = snmp_config_spec.port
+ results['community_previous'] = snmp_config_spec.readOnlyCommunities
+ results['trap_targets_previous'] = self.get_previous_targets(snmp_config_spec.trapTargets)
+ for option in snmp_config_spec.option:
+ if option.key == 'EnvEventSource' and option.value != hw_source:
+ results['hw_source_previous'] = option.value
+ if option.key == 'loglevel' and option.value != hw_source:
+ results['log_level_previous'] = option.value
+ if option.key == 'EventFilter' and option.value != hw_source:
+ results['trap_filter_previous'] = option.value.split(';')
+ if option.key == 'syscontact' and option.value != hw_source:
+ results['syscontact_previous'] = option.value
+ if option.key == 'syslocation' and option.value != hw_source:
+ results['syslocation_previous'] = option.value
+ # Build factory default config
+ destination = vim.host.SnmpSystem.SnmpConfigSpec.Destination()
+ destination.hostName = ""
+ destination.port = 0
+ destination.community = ""
+ options = []
+ options.append(self.create_option('EnvEventSource', 'indications'))
+ options.append(self.create_option('EventFilter', 'reset'))
+ snmp_config_spec = vim.host.SnmpSystem.SnmpConfigSpec()
+ # Looks like this value is causing the reset
+ snmp_config_spec.readOnlyCommunities = [""]
+ snmp_config_spec.trapTargets = [destination]
+ snmp_config_spec.port = 161
+ snmp_config_spec.enabled = False
+ snmp_config_spec.option = options
+ else:
+ if snmp_state == 'enabled' and not snmp_config_spec.enabled:
+ changed = True
+ changed_list.append("state")
+ results['state_previous'] = 'disabled'
+ snmp_config_spec.enabled = True
+ elif snmp_state == 'disabled' and snmp_config_spec.enabled:
+ changed = True
+ changed_list.append("state")
+ results['state_previous'] = 'enabled'
+ snmp_config_spec.enabled = False
+
+ # Check port
+ results['port'] = snmp_port
+ if snmp_config_spec.port != snmp_port:
+ changed = True
+ changed_list.append("port")
+ results['port_previous'] = snmp_config_spec.port
+ snmp_config_spec.port = snmp_port
+
+ # Check read-only community strings
+ results['community'] = community
+ if snmp_config_spec.readOnlyCommunities != community:
+ changed = True
+ changed_list.append("community list")
+ results['community_previous'] = snmp_config_spec.readOnlyCommunities
+ if community:
+ snmp_config_spec.readOnlyCommunities = community
+ else:
+ # Doesn't work. Need to reset config instead
+ # snmp_config_spec.readOnlyCommunities = []
+ reset_hint = True
+
+ # Check trap targets
+ results['trap_targets'] = desired_trap_targets
+ if snmp_config_spec.trapTargets:
+ if desired_trap_targets:
+ temp_desired_targets = []
+ # Loop through desired targets
+ for target in desired_trap_targets:
+ dest_hostname, dest_port, dest_community = self.check_if_options_are_valid(target)
+ trap_target_found = False
+ for trap_target in snmp_config_spec.trapTargets:
+ if trap_target.hostName == dest_hostname:
+ if trap_target.port != dest_port or trap_target.community != dest_community:
+ changed = True
+ changed_list.append("trap target '%s'" % dest_hostname)
+ trap_target_found = True
+ break
+ if not trap_target_found:
+ changed = True
+ changed_list.append("trap target '%s'" % dest_hostname)
+ # Build destination and add to temp target list
+ destination = self.build_destination(dest_hostname, dest_port, dest_community)
+ temp_desired_targets.append(destination)
+ # Loop through existing targets to find targets that need to be deleted
+ for trap_target in snmp_config_spec.trapTargets:
+ target_found = False
+ for target in desired_trap_targets:
+ if trap_target.hostName == target.get('hostname'):
+ target_found = True
+ break
+ if not target_found:
+ changed = True
+ changed_list.append("trap target '%s'" % trap_target.hostName)
+ # Configure trap targets if something has changed
+ if changed:
+ results['trap_targets_previous'] = self.get_previous_targets(snmp_config_spec.trapTargets)
+ snmp_config_spec.trapTargets = temp_desired_targets
+ else:
+ changed = True
+ changed_list.append("trap targets")
+ results['trap_targets_previous'] = self.get_previous_targets(snmp_config_spec.trapTargets)
+ # Doesn't work. Need to reset config instead
+ # snmp_config_spec.trapTargets = []
+ reset_hint = True
+ else:
+ if desired_trap_targets:
+ changed = True
+ changed_list.append("trap targets")
+ results['trap_targets_previous'] = None
+ desired_targets = []
+ for target in desired_trap_targets:
+ dest_hostname, dest_port, dest_community = self.check_if_options_are_valid(target)
+ destination = self.build_destination(dest_hostname, dest_port, dest_community)
+ desired_targets.append(destination)
+ snmp_config_spec.trapTargets = desired_targets
+
+ # Check options
+ results['hw_source'] = hw_source
+ results['log_level'] = log_level
+ results['trap_filter'] = trap_filter
+ event_filter_found = False
+ sys_contact_found = False
+ sys_location_found = False
+ if snmp_config_spec.option:
+ for option in snmp_config_spec.option:
+ if option.key == 'EnvEventSource' and option.value != hw_source:
+ changed = True
+ changed_list.append("HW source")
+ results['hw_source_previous'] = option.value
+ option.value = hw_source
+ if option.key == 'loglevel' and option.value != log_level:
+ changed = True
+ changed_list.append("log level")
+ results['log_level_previous'] = option.value
+ option.value = log_level
+ if option.key == 'EventFilter':
+ event_filter_found = True
+ if event_filter and option.value != event_filter:
+ changed = True
+ changed_list.append("trap filter")
+ results['trap_filter_previous'] = option.value.split(';')
+ option.value = event_filter
+ if option.key == 'syscontact':
+ sys_contact_found = True
+ if sys_contact is not None and option.value != sys_contact:
+ changed = True
+ changed_list.append("sys contact")
+ results['sys_contact_previous'] = option.value
+ option.value = sys_contact
+ if option.key == 'syslocation':
+ sys_location_found = True
+ if sys_location is not None and option.value != sys_location:
+ changed = True
+ changed_list.append("sys location")
+ results['sys_location_previous'] = option.value
+ option.value = sys_location
+ if trap_filter and not event_filter_found:
+ changed = True
+ changed_list.append("trap filter")
+ results['trap_filter_previous'] = []
+ snmp_config_spec.option.append(self.create_option('EventFilter', event_filter))
+ elif not trap_filter and event_filter_found:
+ changed = True
+ changed_list.append("trap filter")
+ # options = []
+ for option in snmp_config_spec.option:
+ if option.key == 'EventFilter':
+ results['trap_filter_previous'] = option.value.split(';')
+ # else:
+ # options.append(option)
+ # Doesn't work. Need to reset config instead
+ # snmp_config_spec.option = options
+ reset_hint = True
+ if sys_contact and not sys_contact_found:
+ changed = True
+ changed_list.append("sys contact")
+ results['sys_contact_previous'] = ''
+ snmp_config_spec.option.append(self.create_option('syscontact', sys_contact))
+ if sys_location and not sys_location_found:
+ changed = True
+ changed_list.append("sys location")
+ results['sys_location_previous'] = ''
+ snmp_config_spec.option.append(self.create_option('syslocation', sys_location))
+ if changed:
+ if snmp_state == 'reset':
+ if self.module.check_mode:
+ message = "SNMP agent would be reset to factory defaults"
+ else:
+ message = "SNMP agent config reset to factory defaults"
+ else:
+ if self.module.check_mode:
+ changed_suffix = ' would be changed'
+ else:
+ changed_suffix = ' changed'
+ if len(changed_list) > 2:
+ message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
+ elif len(changed_list) == 2:
+ message = ' and '.join(changed_list)
+ elif len(changed_list) == 1:
+ message = changed_list[0]
+ message = "SNMP " + message + changed_suffix
+ if reset_hint:
+ message += ". Agent reset required!"
+ if not self.module.check_mode:
+ try:
+ snmp_system.ReconfigureSnmpAgent(snmp_config_spec)
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(
+ msg="Not found : %s" % to_native(not_found)
+ )
+ except vim.fault.InsufficientResourcesFault as insufficient_resources:
+ self.module.fail_json(
+ msg="Insufficient resources : %s" % to_native(insufficient_resources)
+ )
+ else:
+ message = "SNMP already configured properly"
+ if not snmp_state == 'reset' and send_trap and desired_trap_targets:
+ # Check if there was a change before
+ if changed:
+ message += " and "
+ else:
+ message += ", but "
+ changed = True
+ if self.module.check_mode:
+ message = message + "a test trap would be sent"
+ else:
+ try:
+ snmp_system.SendTestNotification()
+ message = message + "a test trap was sent"
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(
+ msg="Error during trap test : Not found : %s" % to_native(not_found)
+ )
+ except vim.fault.InsufficientResourcesFault as insufficient_resources:
+ self.module.fail_json(
+ msg="Error during trap test : Insufficient resources : %s" % to_native(insufficient_resources)
+ )
+ results['changed'] = changed
+ results['msg'] = message
+
+ self.module.exit_json(**results)
+
+ @staticmethod
+ def create_option(key, value):
+ """Create option"""
+ option = vim.KeyValue()
+ option.key = key
+ option.value = value
+ return option
+
+ @staticmethod
+ def get_previous_targets(trap_targets):
+ """Get target entries from trap targets object"""
+ previous_targets = []
+ for target in trap_targets:
+ temp = dict()
+ temp['hostname'] = target.hostName
+ temp['port'] = target.port
+ temp['community'] = target.community
+ previous_targets.append(temp)
+ return previous_targets
+
+ @staticmethod
+ def build_destination(dest_hostname, dest_port, dest_community):
+ """Build destination spec"""
+ destination = vim.host.SnmpSystem.SnmpConfigSpec.Destination()
+ destination.hostName = dest_hostname
+ destination.port = dest_port
+ destination.community = dest_community
+ return destination
+
+ def check_if_options_are_valid(self, target):
+ """Check if options are valid"""
+ dest_hostname = target.get('hostname', None)
+ if dest_hostname is None:
+ self.module.fail_json(
+ msg="Please specify hostname for the trap target as it's a required parameter"
+ )
+ dest_port = target.get('port', None)
+ if dest_port is None:
+ self.module.fail_json(
+ msg="Please specify port for the trap target as it's a required parameter"
+ )
+ dest_community = target.get('community', None)
+ if dest_community is None:
+ self.module.fail_json(
+ msg="Please specify community for the trap target as it's a required parameter"
+ )
+ return dest_hostname, dest_port, dest_community
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='disabled', choices=['enabled', 'disabled', 'reset']),
+ snmp_port=dict(type='int', default=161),
+ community=dict(type='list', default=[], elements='str'),
+ trap_targets=dict(type='list', default=list(), elements='dict'),
+ trap_filter=dict(type='list', elements='str'),
+ hw_source=dict(type='str', default='indications', choices=['indications', 'sensors']),
+ log_level=dict(type='str', default='info', choices=['debug', 'info', 'warning', 'error']),
+ send_trap=dict(type='bool', default=False),
+ sys_contact=dict(type='str'),
+ sys_location=dict(type='str')
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ host_snmp = VmwareHostSnmp(module)
+ host_snmp.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_sriov.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_sriov.py
new file mode 100644
index 000000000..5277dc6ba
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_sriov.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (C) 2020, Viktor Tsymbalyuk
+# Copyright: (C) 2020, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_sriov
+short_description: Manage SR-IOV settings on host
+description:
+- This module can be used to configure, enable or disable SR-IOV functions on ESXi host.
+- Module does not reboot the host after changes, but puts it in output "rebootRequired" state.
+- User can specify an ESXi hostname or Cluster name. In case of cluster name, all ESXi hosts are updated.
+author:
+- Viktor Tsymbalyuk (@victron)
+options:
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - This parameter is required if C(cluster_name) is not specified.
+ - User can specify specific host from the cluster.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - This parameter is required if C(esxi_hostname) is not specified.
+ type: str
+ vmnic:
+ description:
+ - Interface name, like vmnic0.
+ type: str
+ required: true
+ num_virt_func:
+ description:
+ - number of functions to activate on interface.
+ - 0 means SR-IOV disabled.
+ - number greater than 0 means SR-IOV enabled.
+ type: int
+ required: true
+ sriov_on:
+ description:
+ - optional parameter, related to C(num_virt_func).
+ - SR-IOV can be enabled only if C(num_virt_func) > 0.
+ type: bool
+ required: false
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: enable SR-IOV on vmnic0 with 8 functions
+ community.vmware.vmware_host_sriov:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi1 }}"
+ vmnic: vmnic0
+ sriov_on: true
+ num_virt_func: 8
+
+- name: enable SR-IOV on already enabled interface vmnic0
+ community.vmware.vmware_host_sriov:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi1 }}"
+ vmnic: vmnic0
+ sriov_on: true
+ num_virt_func: 8
+
+- name: enable SR-IOV on vmnic0 with big number of functions
+ community.vmware.vmware_host_sriov:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi1 }}"
+ vmnic: vmnic0
+ sriov_on: true
+ num_virt_func: 100
+ ignore_errors: true
+
+- name: disable SR-IOV on vmnic0
+ community.vmware.vmware_host_sriov:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi1 }}"
+ vmnic: vmnic0
+ sriov_on: false
+ num_virt_func: 0
+'''
+
+RETURN = r'''
+host_sriov_diff:
+ description:
+ - contains info about SR-IOV status on vmnic before, after and requested changes
+ - sometimes vCenter slowly update info, as result "after" contains same info as "before"
+ need to run again in check_mode or reboot host, as ESXi requested
+ returned: always
+ type: dict
+ "sample": {
+ "changed": true,
+ "diff": {
+ "after": {
+ "host_test": {
+ "sriovActive": false,
+ "sriovEnabled": true,
+ "maxVirtualFunctionSupported": 63,
+ "numVirtualFunction": 0,
+ "numVirtualFunctionRequested": 8,
+ "rebootRequired": true,
+ "sriovCapable": true
+ }
+ },
+ "before": {
+ "host_test": {
+ "sriovActive": false,
+ "sriovEnabled": false,
+ "maxVirtualFunctionSupported": 63,
+ "numVirtualFunction": 0,
+ "numVirtualFunctionRequested": 0,
+ "rebootRequired": false,
+ "sriovCapable": true
+ }
+ },
+ "changes": {
+ "host_test": {
+ "numVirtualFunction": 8,
+ "rebootRequired": true,
+ "sriovEnabled": true
+ }
+ }
+ }
+ }
+'''
+
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+from time import sleep
+
+
+class VmwareAdapterConfigManager(PyVmomi):
+ """Class to configure SR-IOV settings"""
+
+ def __init__(self, module):
+ super(VmwareAdapterConfigManager, self).__init__(module)
+ cluster_name = self.params.get("cluster_name", None)
+ esxi_host_name = self.params.get("esxi_hostname", None)
+
+ self.vmnic = self.params.get("vmnic", None)
+ self.num_virt_func = self.params.get("num_virt_func", None)
+ self.sriov_on = self.params.get("sriov_on", None)
+
+ # prepare list of hosts to work with them
+ self.hosts = self.get_all_host_objs(
+ cluster_name=cluster_name, esxi_host_name=esxi_host_name
+ )
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system.")
+ self.results = {"before": {}, "after": {}, "changes": {}}
+
+ def sanitize_params(self):
+ """checks user input, raise error if input incompatible
+ :return : None
+ """
+
+ if self.num_virt_func < 0:
+ self.module.fail_json(msg="allowed value for num_virt_func >= 0")
+ if self.num_virt_func == 0:
+ if self.sriov_on is True:
+ self.module.fail_json(
+ msg="with sriov_on == true, allowed value for num_virt_func > 0"
+ )
+ self.sriov_on = False # fill value, if user not provided
+
+ if self.num_virt_func > 0:
+ if self.sriov_on is False:
+ self.module.fail_json(
+ msg="with sriov_on == false, allowed value for num_virt_func is 0"
+ )
+ self.sriov_on = True # fill value, if user not provided
+
+ def check_compatibility(self, before, hostname):
+ """
+ checks hardware compatibility with user input, raise error if input incompatible
+ :before : dict, of params on target interface before changing
+ :hostname : str, hosthame
+ :return : None
+ """
+ if self.num_virt_func > 0:
+ if not before["sriovCapable"]:
+ self.module.fail_json(
+ msg="sriov not supported on host= %s, nic= %s" % (hostname, self.vmnic)
+ )
+
+ if before["maxVirtualFunctionSupported"] < self.num_virt_func:
+ self.module.fail_json(
+ msg="maxVirtualFunctionSupported= %d on %s" % (before["maxVirtualFunctionSupported"], self.vmnic)
+ )
+
+ def make_diff(self, before, hostname):
+ """
+ preparing diff - changes which will be applied
+ :before : dict, of params on target interface before changing
+ :hostname : str, hosthame
+ :return : dict, of changes which is going to apply
+ """
+ diff = {}
+ change = False
+ change_msg = ""
+
+ if before["sriovEnabled"] != self.sriov_on:
+ diff["sriovEnabled"] = self.sriov_on
+ change = True
+
+ if before["numVirtualFunction"] != self.num_virt_func:
+ if before["numVirtualFunctionRequested"] != self.num_virt_func:
+ diff["numVirtualFunction"] = self.num_virt_func
+ change = True
+ else:
+ change_msg = "Not active (looks like not rebooted) "
+
+ if not change:
+ change_msg += "No any changes, already configured "
+ diff["msg"] = change_msg
+ diff["change"] = change
+
+ return diff
+
+ def set_host_state(self):
+ """Checking and applying ESXi host configuration one by one,
+ from prepared list of hosts in `self.hosts`.
+ For every host applied:
+ - user input checking done via calling `sanitize_params` method
+ - checks hardware compatibility with user input `check_compatibility`
+ - conf changes created via `make_diff`
+ - changes applied via calling `_update_sriov` method
+ - host state before and after via calling `_check_sriov`
+ """
+ self.sanitize_params()
+ change_list = []
+ changed = False
+ for host in self.hosts:
+ self.results["before"][host.name] = {}
+ self.results["after"][host.name] = {}
+ self.results["changes"][host.name] = {}
+ self.results["before"][host.name] = self._check_sriov(host)
+
+ self.check_compatibility(self.results["before"][host.name], host.name)
+ diff = self.make_diff(self.results["before"][host.name], host.name)
+ self.results["changes"][host.name] = diff
+
+ if not diff["change"]:
+ change_list.append(False)
+ self.results["after"][host.name] = self._check_sriov(host)
+ if (self.results["before"][host.name]["rebootRequired"] != self.results["after"][host.name]["rebootRequired"]):
+ self.results["changes"][host.name]["rebootRequired"] = self.results["after"][host.name]["rebootRequired"]
+ continue
+
+ success = self._update_sriov(host, self.sriov_on, self.num_virt_func)
+ if success:
+ change_list.append(True)
+ else:
+ change_list.append(False)
+
+ self.results["after"][host.name] = self._check_sriov(host)
+ self.results["changes"][host.name].update(
+ {
+ "rebootRequired": self.results["after"][host.name]["rebootRequired"]
+ }
+ )
+
+ if any(change_list):
+ changed = True
+ self.module.exit_json(changed=changed, diff=self.results)
+
+ def _check_sriov(self, host):
+ pnic_info = {}
+ pnic_info["rebootRequired"] = host.summary.rebootRequired
+ for pci_device in host.configManager.pciPassthruSystem.pciPassthruInfo:
+ if pci_device.id == self._getPciId(host):
+ try:
+ if pci_device.sriovCapable:
+ pnic_info["sriovCapable"] = True
+ pnic_info["sriovEnabled"] = pci_device.sriovEnabled
+ pnic_info["sriovActive"] = pci_device.sriovActive
+ pnic_info["numVirtualFunction"] = pci_device.numVirtualFunction
+ pnic_info[
+ "numVirtualFunctionRequested"
+ ] = pci_device.numVirtualFunctionRequested
+ pnic_info[
+ "maxVirtualFunctionSupported"
+ ] = pci_device.maxVirtualFunctionSupported
+ else:
+ pnic_info["sriovCapable"] = False
+ except AttributeError:
+ pnic_info["sriovCapable"] = False
+ break
+ return pnic_info
+
+ def _getPciId(self, host):
+ for pnic in host.config.network.pnic:
+ if pnic.device == self.vmnic:
+ return pnic.pci
+ self.module.fail_json(msg="No nic= %s on host= %s" % (self.vmnic, host.name))
+
+ def _update_sriov(self, host, sriovEnabled, numVirtualFunction):
+ nic_sriov = vim.host.SriovConfig()
+ nic_sriov.id = self._getPciId(host)
+ nic_sriov.sriovEnabled = sriovEnabled
+ nic_sriov.numVirtualFunction = numVirtualFunction
+
+ try:
+ if not self.module.check_mode:
+ host.configManager.pciPassthruSystem.UpdatePassthruConfig([nic_sriov])
+ # looks only for refresh info
+ host.configManager.pciPassthruSystem.Refresh()
+ sleep(2) # TODO: needed method to know that host updated info
+ return True
+ return False
+ except vim.fault.HostConfigFault as config_fault:
+ self.module.fail_json(
+ msg="Failed to configure SR-IOV for host= %s due to : %s"
+ % (host.name, to_native(config_fault.msg))
+ )
+ return False
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type="str", required=False),
+ esxi_hostname=dict(type="str", required=False),
+ vmnic=dict(type="str", required=True),
+ num_virt_func=dict(type="int", required=True),
+ sriov_on=dict(type="bool", required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ["cluster_name", "esxi_hostname"],
+ ],
+ supports_check_mode=True,
+ )
+
+ vmware_host_adapter_config = VmwareAdapterConfigManager(module)
+ vmware_host_adapter_config.set_host_state()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_ssl_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_ssl_info.py
new file mode 100644
index 000000000..9530e96dd
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_ssl_info.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_ssl_info
+short_description: Gather info of ESXi host system about SSL
+description:
+- This module can be used to gather information of the SSL thumbprint information for a host.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - SSL thumbprint information about all ESXi host system in the given cluster will be reported.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname.
+ - SSL thumbprint information of this ESXi host system will be reported.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather SSL thumbprint information about all ESXi Hosts in given Cluster
+ community.vmware.vmware_host_ssl_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ delegate_to: localhost
+ register: all_host_ssl_info
+
+- name: Get SSL Thumbprint info about "{{ esxi_hostname }}"
+ community.vmware.vmware_host_ssl_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: '{{ esxi_hostname }}'
+ register: ssl_info
+- set_fact:
+ ssl_thumbprint: "{{ ssl_info['host_ssl_info'][esxi_hostname]['ssl_thumbprints'][0] }}"
+- debug:
+ msg: "{{ ssl_thumbprint }}"
+- name: Add ESXi Host to vCenter
+ vmware_host:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter_name: '{{ datacenter_name }}'
+ cluster_name: '{{ cluster_name }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ esxi_username: '{{ esxi_username }}'
+ esxi_password: '{{ esxi_password }}'
+ esxi_ssl_thumbprint: '{{ ssl_thumbprint }}'
+ state: present
+'''
+
+RETURN = r'''
+host_ssl_info:
+ description:
+ - dict with hostname as key and dict with SSL thumbprint related info
+ returned: info
+ type: dict
+ sample:
+ {
+ "10.76.33.215": {
+ "owner_tag": "",
+ "principal": "vpxuser",
+ "ssl_thumbprints": [
+ "E3:E8:A9:20:8D:32:AE:59:C6:8D:A5:91:B0:20:EF:00:A2:7C:27:EE",
+ "F1:AC:DA:6E:D8:1E:37:36:4A:5C:07:E5:04:0B:87:C8:75:FB:42:01"
+ ]
+ }
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VMwareHostSslManager(PyVmomi):
+ def __init__(self, module):
+ super(VMwareHostSslManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ self.hosts_info = {}
+
+ def gather_ssl_info(self):
+ for host in self.hosts:
+ self.hosts_info[host.name] = dict(
+ principal='',
+ owner_tag='',
+ ssl_thumbprints=[])
+
+ host_ssl_info_mgr = host.config.sslThumbprintInfo
+ if host_ssl_info_mgr:
+ self.hosts_info[host.name]['principal'] = host_ssl_info_mgr.principal
+ self.hosts_info[host.name]['owner_tag'] = host_ssl_info_mgr.ownerTag
+ self.hosts_info[host.name]['ssl_thumbprints'] = list(host_ssl_info_mgr.sslThumbprints)
+
+ self.module.exit_json(changed=False, host_ssl_info=self.hosts_info)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str'),
+ esxi_hostname=dict(type='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True,
+ )
+
+ vmware_host_accept_config = VMwareHostSslManager(module)
+ vmware_host_accept_config.gather_ssl_info()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_tcpip_stacks.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_tcpip_stacks.py
new file mode 100644
index 000000000..c772846d3
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_tcpip_stacks.py
@@ -0,0 +1,623 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_tcpip_stacks
+short_description: Manage the TCP/IP Stacks configuration of ESXi host
+author:
+ - sky-joker (@sky-joker)
+description:
+ - This module can be used to modify the TCP/IP stacks configuration.
+options:
+ esxi_hostname:
+ description:
+ - Name of the ESXi host.
+ type: str
+ required: true
+ default:
+ description:
+ - The TCP/IP stacks configuration of the I(default).
+ suboptions:
+ hostname:
+ description:
+ - The host name of the ESXi host.
+ type: str
+ required: true
+ domain:
+ description:
+ - The domain name portion of the DNS name.
+ type: str
+ required: true
+ preferred_dns:
+ description:
+ - The IP address of the preferred dns server.
+ type: str
+ alternate_dns:
+ description:
+ - The IP address of the alternate dns server.
+ type: str
+ search_domains:
+ description:
+ - The domain in which to search for hosts, placed in order of preference.
+ default: []
+ elements: str
+ type: list
+ gateway:
+ description:
+ - The ipv4 gateway address.
+ type: str
+ ipv6_gateway:
+ description:
+ - The ipv6 gateway address.
+ type: str
+ congestion_algorithm:
+ description:
+ - The TCP congest control algorithm.
+ choices:
+ - newreno
+ - cubic
+ default: newreno
+ type: str
+ max_num_connections:
+ description:
+ - The maximum number of socket connection that are requested.
+ default: 11000
+ type: int
+ type: dict
+ provisioning:
+ description:
+ - The TCP/IP stacks configuration of the I(provisioning).
+ suboptions:
+ gateway:
+ description:
+ - The ipv4 gateway address.
+ type: str
+ ipv6_gateway:
+ description:
+ - The ipv6 gateway address.
+ type: str
+ congestion_algorithm:
+ description:
+ - The TCP congest control algorithm.
+ choices:
+ - newreno
+ - cubic
+ default: newreno
+ type: str
+ max_num_connections:
+ description:
+ - The maximum number of socket connection that are requested.
+ default: 11000
+ type: int
+ type: dict
+ vmotion:
+ description:
+ - The TCP/IP stacks configuration of the I(vmotion).
+ suboptions:
+ gateway:
+ description:
+ - The ipv4 gateway address.
+ type: str
+ ipv6_gateway:
+ description:
+ - The ipv6 gateway address.
+ type: str
+ congestion_algorithm:
+ description:
+ - The TCP congest control algorithm.
+ choices:
+ - newreno
+ - cubic
+ default: newreno
+ type: str
+ max_num_connections:
+ description:
+ - The maximum number of socket connection that are requested.
+ default: 11000
+ type: int
+ type: dict
+ vxlan:
+ description:
+ - The TCP/IP stacks configuration of the I(vxlan).
+ suboptions:
+ gateway:
+ description:
+ - The ipv4 gateway address.
+ type: str
+ ipv6_gateway:
+ description:
+ - The ipv6 gateway address.
+ type: str
+ congestion_algorithm:
+ description:
+ - The TCP congest control algorithm.
+ choices:
+ - newreno
+ - cubic
+ default: newreno
+ type: str
+ max_num_connections:
+ description:
+ - The maximum number of socket connection that are requested.
+ default: 11000
+ type: int
+ type: dict
+ aliases:
+ - nsx_overlay
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Update the TCP/IP stack configuration of the default
+ community.vmware.vmware_host_tcpip_stacks:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ esxi_hostname: "{{ esxi_hostname }}"
+ default:
+ hostname: "{{ esxi_hostname }}"
+ domain: example.com
+ preferred_dns: 192.168.10.1
+ alternate_dns: 192.168.20.1
+ search_domains:
+ - hoge.com
+ - fuga.com
+ gateway: 192.168.10.1
+ congestion_algorithm: cubic
+ max_num_connections: 12000
+
+- name: Update the TCP/IP stack configuration of the provisioning
+ community.vmware.vmware_host_tcpip_stacks:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ esxi_hostname: "{{ esxi_hostname }}"
+ provisioning:
+ congestion_algorithm: newreno
+ max_num_connections: 12000
+ gateway: 10.10.10.254
+
+- name: Update the TCP/IP stack configuration of the default and provisioning
+ community.vmware.vmware_host_tcpip_stacks:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ esxi_hostname: "{{ esxi_hostname }}"
+ default:
+ hostname: "{{ esxi_hostname }}"
+ domain: example.com
+ preferred_dns: 192.168.10.1
+ alternate_dns: 192.168.20.1
+ search_domains:
+ - hoge.com
+ - fuga.com
+ gateway: 192.168.10.1
+ congestion_algorithm: cubic
+ max_num_connections: 12000
+ provisioning:
+ congestion_algorithm: newreno
+ max_num_connections: 12000
+ gateway: 10.10.10.254
+
+- name: Update the ipv6 gateway of the provisioning TCP/IP stack
+ community.vmware.vmware_host_tcpip_stacks:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ esxi_hostname: "{{ esxi_hostname }}"
+ provisioning:
+ ipv6_gateway: ::ffff:6440:301
+'''
+
+RETURN = r'''
+default:
+ description: dict of the TCP/IP stack configuration of the default.
+ returned: always
+ type: dict
+ sample: >-
+ {
+ "alternate_dns": "192.168.20.1",
+ "congestion_algorithm": "cubic",
+ "domain": "example.com",
+ "gateway": "192.168.10.1",
+ "ipv6_gateway", null,
+ "hostname": "esxi-test03",
+ "max_num_connections": 12000,
+ "preferred_dns": "192.168.10.1",
+ "search_domains": [
+ "hoge.com",
+ "fuga.com"
+ ]
+ }
+provisioning:
+ description: dict of the TCP/IP stack configuration of the provisioning.
+ returned: always
+ type: dict
+ sample: >-
+ {
+ "congestion_algorithm": "newreno",
+ "gateway": "10.10.10.254",
+ "ipv6_gateway": null,
+ "max_num_connections": 12000
+ }
+vmotion:
+ description: dict of the TCP/IP stack configuration of the vmotion.
+ returned: always
+ type: dict
+ sample: >-
+ {
+ "congestion_algorithm": "newreno",
+ "gateway": null,
+ "ipv6_gateway": null,
+ "max_num_connections": 11000
+ }
+vxlan:
+ description: dict of the TCP/IP stack configuration of the vxlan.
+ returned: always
+ type: dict
+ sample: >-
+ {
+ "congestion_algorithm": "newreno",
+ "gateway": null,
+ "ipv6_gateway": null,
+ "max_num_connections": 11000
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ try:
+ from ordereddict import OrderedDict
+ except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VmwareHostTcpipStack(PyVmomi):
+ def __init__(self, module):
+ super(VmwareHostTcpipStack, self).__init__(module)
+ self.esxi_hostname = self.params['esxi_hostname']
+ self.default = self.params['default']
+ self.provisioning = self.params['provisioning']
+ self.vmotion = self.params['vmotion']
+ self.vxlan = self.params['vxlan']
+
+ self.net_stack_instance_keys = {
+ "default": "defaultTcpipStack",
+ "vmotion": "vmotion",
+ "provisioning": "vSphereProvisioning",
+ "vxlan": "vxlan"
+ }
+
+ def check_enabled_net_stack_instance(self):
+ """
+ Make sure if enabled each the tcpip stack item in ESXi host.
+ """
+ self.enabled_net_stack_instance = {
+ "default": False,
+ "vmotion": False,
+ "provisioning": False,
+ "vxlan": False
+ }
+ for net_stack_instance in self.host_obj.runtime.networkRuntimeInfo.netStackInstanceRuntimeInfo:
+ if net_stack_instance.netStackInstanceKey == self.net_stack_instance_keys['default']:
+ self.enabled_net_stack_instance['default'] = True
+
+ if net_stack_instance.netStackInstanceKey == self.net_stack_instance_keys['provisioning']:
+ self.enabled_net_stack_instance['provisioning'] = True
+
+ if net_stack_instance.netStackInstanceKey == self.net_stack_instance_keys['vmotion']:
+ self.enabled_net_stack_instance['vmotion'] = True
+
+ if net_stack_instance.netStackInstanceKey == self.net_stack_instance_keys['vxlan']:
+ self.enabled_net_stack_instance['vxlan'] = True
+
+ def get_net_stack_instance_config(self):
+ """
+ Get a configuration of tcpip stack item if it is enabled.
+ """
+ self.exist_net_stack_instance_config = {}
+ for key, value in self.enabled_net_stack_instance.items():
+ if value is True:
+ for net_stack_instance in self.host_obj.config.network.netStackInstance:
+ if net_stack_instance.key == self.net_stack_instance_keys[key]:
+ self.exist_net_stack_instance_config[key] = net_stack_instance
+
+ def diff_net_stack_instance_config(self):
+ """
+ Check the difference between a new and existing config.
+ """
+ self.change_flag = False
+
+ # Make the diff_config variable to check the difference between a new and existing config.
+ self.diff_config = dict(before={}, after={})
+ for key, value in self.enabled_net_stack_instance.items():
+ if value is True:
+ self.diff_config['before'][key] = {}
+ self.diff_config['after'][key] = {}
+
+ if self.enabled_net_stack_instance['default']:
+ exist_dns_servers = self.exist_net_stack_instance_config['default'].dnsConfig.address
+ for key in 'before', 'after':
+ self.diff_config[key]['default'] = dict(
+ hostname=self.exist_net_stack_instance_config['default'].dnsConfig.hostName,
+ domain=self.exist_net_stack_instance_config['default'].dnsConfig.domainName,
+ preferred_dns=exist_dns_servers[0] if [dns for dns in exist_dns_servers if exist_dns_servers.index(dns) == 0] else None,
+ alternate_dns=exist_dns_servers[1] if [dns for dns in exist_dns_servers if exist_dns_servers.index(dns) == 1] else None,
+ search_domains=self.exist_net_stack_instance_config['default'].dnsConfig.searchDomain,
+ gateway=self.exist_net_stack_instance_config['default'].ipRouteConfig.defaultGateway,
+ ipv6_gateway=self.exist_net_stack_instance_config['default'].ipRouteConfig.ipV6DefaultGateway,
+ congestion_algorithm=self.exist_net_stack_instance_config['default'].congestionControlAlgorithm,
+ max_num_connections=self.exist_net_stack_instance_config['default'].requestedMaxNumberOfConnections
+ )
+ if self.default:
+ if self.diff_config['before']['default']['hostname'] != self.default['hostname']:
+ self.change_flag = True
+ self.diff_config['after']['default']['hostname'] = self.default['hostname']
+ if self.diff_config['before']['default']['domain'] != self.default['domain']:
+ self.change_flag = True
+ self.diff_config['after']['default']['domain'] = self.default['domain']
+ if self.diff_config['before']['default']['preferred_dns'] != self.default['preferred_dns']:
+ self.change_flag = True
+ self.diff_config['after']['default']['preferred_dns'] = self.default['preferred_dns']
+ if self.diff_config['before']['default']['alternate_dns'] != self.default['alternate_dns']:
+ self.change_flag = True
+ self.diff_config['after']['default']['alternate_dns'] = self.default['alternate_dns']
+ if self.diff_config['before']['default']['search_domains'] != self.default['search_domains']:
+ self.change_flag = True
+ self.diff_config['after']['default']['search_domains'] = self.default['search_domains']
+ if self.diff_config['before']['default']['gateway'] != self.default['gateway']:
+ self.change_flag = True
+ self.diff_config['after']['default']['gateway'] = self.default['gateway']
+ if self.diff_config['before']['default']['ipv6_gateway'] != self.default['ipv6_gateway']:
+ self.change_flag = True
+ self.diff_config['after']['default']['ipv6_gateway'] = self.default['ipv6_gateway']
+ if self.diff_config['before']['default']['congestion_algorithm'] != self.default['congestion_algorithm']:
+ self.change_flag = True
+ self.diff_config['after']['default']['congestion_algorithm'] = self.default['congestion_algorithm']
+ if self.diff_config['before']['default']['max_num_connections'] != self.default['max_num_connections']:
+ self.change_flag = True
+ self.diff_config['after']['default']['max_num_connections'] = self.default['max_num_connections']
+
+ if self.enabled_net_stack_instance['provisioning']:
+ for key in 'before', 'after':
+ self.diff_config[key]['provisioning'] = dict(
+ gateway=self.exist_net_stack_instance_config['provisioning'].ipRouteConfig.defaultGateway,
+ ipv6_gateway=self.exist_net_stack_instance_config['provisioning'].ipRouteConfig.ipV6DefaultGateway,
+ congestion_algorithm=self.exist_net_stack_instance_config['provisioning'].congestionControlAlgorithm,
+ max_num_connections=self.exist_net_stack_instance_config['provisioning'].requestedMaxNumberOfConnections
+ )
+ if self.provisioning:
+ if self.diff_config['before']['provisioning']['gateway'] != self.provisioning['gateway']:
+ self.change_flag = True
+ self.diff_config['after']['provisioning']['gateway'] = self.provisioning['gateway']
+ if self.diff_config['before']['provisioning']['ipv6_gateway'] != self.provisioning['ipv6_gateway']:
+ self.change_flag = True
+ self.diff_config['after']['provisioning']['ipv6_gateway'] = self.provisioning['ipv6_gateway']
+ if self.diff_config['before']['provisioning']['max_num_connections'] != self.provisioning['max_num_connections']:
+ self.change_flag = True
+ self.diff_config['after']['provisioning']['max_num_connections'] = self.provisioning['max_num_connections']
+ if self.diff_config['before']['provisioning']['congestion_algorithm'] != self.provisioning['congestion_algorithm']:
+ self.change_flag = True
+ self.diff_config['after']['provisioning']['congestion_algorithm'] = self.provisioning['congestion_algorithm']
+
+ if self.enabled_net_stack_instance['vmotion']:
+ for key in 'before', 'after':
+ self.diff_config[key]['vmotion'] = dict(
+ gateway=self.exist_net_stack_instance_config['vmotion'].ipRouteConfig.defaultGateway,
+ ipv6_gateway=self.exist_net_stack_instance_config['vmotion'].ipRouteConfig.ipV6DefaultGateway,
+ congestion_algorithm=self.exist_net_stack_instance_config['vmotion'].congestionControlAlgorithm,
+ max_num_connections=self.exist_net_stack_instance_config['vmotion'].requestedMaxNumberOfConnections
+ )
+ if self.vmotion:
+ if self.diff_config['before']['vmotion']['gateway'] != self.vmotion['gateway']:
+ self.change_flag = True
+ self.diff_config['after']['vmotion']['gateway'] = self.vmotion['gateway']
+ if self.diff_config['before']['vmotion']['ipv6_gateway'] != self.vmotion['ipv6_gateway']:
+ self.change_flag = True
+ self.diff_config['after']['vmotion']['ipv6_gateway'] = self.vmotion['ipv6_gateway']
+ if self.diff_config['before']['vmotion']['max_num_connections'] != self.vmotion['max_num_connections']:
+ self.change_flag = True
+ self.diff_config['after']['vmotion']['max_num_connections'] = self.vmotion['max_num_connections']
+ if self.diff_config['before']['vmotion']['congestion_algorithm'] != self.vmotion['congestion_algorithm']:
+ self.change_flag = True
+ self.diff_config['after']['vmotion']['congestion_algorithm'] = self.vmotion['congestion_algorithm']
+
+ if self.enabled_net_stack_instance['vxlan']:
+ for key in 'before', 'after':
+ self.diff_config[key]['vxlan'] = dict(
+ gateway=self.exist_net_stack_instance_config['vxlan'].ipRouteConfig.defaultGateway,
+ ipv6_gateway=self.exist_net_stack_instance_config['vxlan'].ipRouteConfig.ipV6DefaultGateway,
+ congestion_algorithm=self.exist_net_stack_instance_config['vxlan'].congestionControlAlgorithm,
+ max_num_connections=self.exist_net_stack_instance_config['vxlan'].requestedMaxNumberOfConnections
+ )
+ if self.vxlan:
+ if self.diff_config['before']['vxlan']['gateway'] != self.vxlan['gateway']:
+ self.change_flag = True
+ self.diff_config['after']['vxlan']['gateway'] = self.vxlan['gateway']
+ if self.diff_config['before']['vxlan']['ipv6_gateway'] != self.vxlan['ipv6_gateway']:
+ self.change_flag = True
+ self.diff_config['after']['vxlan']['ipv6_gateway'] = self.vxlan['ipv6_gateway']
+ if self.diff_config['before']['vxlan']['max_num_connections'] != self.vxlan['max_num_connections']:
+ self.change_flag = True
+ self.diff_config['after']['vxlan']['max_num_connections'] = self.vxlan['max_num_connections']
+ if self.diff_config['before']['vxlan']['congestion_algorithm'] != self.vxlan['congestion_algorithm']:
+ self.change_flag = True
+ self.diff_config['after']['vxlan']['congestion_algorithm'] = self.vxlan['congestion_algorithm']
+
+ def generate_net_stack_instance_config(self):
+ """
+ Generate a new configuration for tcpip stack to modify the configuration.
+ """
+ self.new_net_stack_instance_configs = vim.host.NetworkConfig()
+ self.new_net_stack_instance_configs.netStackSpec = []
+
+ if self.default and self.enabled_net_stack_instance['default']:
+ default_config = vim.host.NetworkConfig.NetStackSpec()
+ default_config.operation = 'edit'
+ default_config.netStackInstance = vim.host.NetStackInstance()
+ default_config.netStackInstance.key = self.net_stack_instance_keys['default']
+ default_config.netStackInstance.ipRouteConfig = vim.host.IpRouteConfig()
+ default_config.netStackInstance.ipRouteConfig.defaultGateway = self.default['gateway']
+ default_config.netStackInstance.ipRouteConfig.ipV6DefaultGateway = self.default['ipv6_gateway']
+ default_config.netStackInstance.dnsConfig = vim.host.DnsConfig()
+ default_config.netStackInstance.dnsConfig.hostName = self.default['hostname']
+ default_config.netStackInstance.dnsConfig.domainName = self.default['domain']
+ dns_servers = []
+ if self.default['preferred_dns']:
+ dns_servers.append(self.default['preferred_dns'])
+ if self.default['alternate_dns']:
+ dns_servers.append(self.default['alternate_dns'])
+ default_config.netStackInstance.dnsConfig.address = dns_servers
+ default_config.netStackInstance.dnsConfig.searchDomain = self.default['search_domains']
+ default_config.netStackInstance.congestionControlAlgorithm = self.default['congestion_algorithm']
+ default_config.netStackInstance.requestedMaxNumberOfConnections = self.default['max_num_connections']
+ self.new_net_stack_instance_configs.netStackSpec.append(default_config)
+
+ if self.provisioning and self.enabled_net_stack_instance['provisioning']:
+ provisioning_config = vim.host.NetworkConfig.NetStackSpec()
+ provisioning_config.operation = 'edit'
+ provisioning_config.netStackInstance = vim.host.NetStackInstance()
+ provisioning_config.netStackInstance.key = self.net_stack_instance_keys['provisioning']
+ provisioning_config.netStackInstance.ipRouteConfig = vim.host.IpRouteConfig()
+ provisioning_config.netStackInstance.ipRouteConfig.defaultGateway = self.provisioning['gateway']
+ provisioning_config.netStackInstance.ipRouteConfig.ipV6DefaultGateway = self.provisioning['ipv6_gateway']
+ provisioning_config.netStackInstance.congestionControlAlgorithm = self.provisioning['congestion_algorithm']
+ provisioning_config.netStackInstance.requestedMaxNumberOfConnections = self.provisioning['max_num_connections']
+ self.new_net_stack_instance_configs.netStackSpec.append(provisioning_config)
+
+ if self.vmotion and self.enabled_net_stack_instance['vmotion']:
+ vmotion_config = vim.host.NetworkConfig.NetStackSpec()
+ vmotion_config.operation = 'edit'
+ vmotion_config.netStackInstance = vim.host.NetStackInstance()
+ vmotion_config.netStackInstance.key = self.net_stack_instance_keys['vmotion']
+ vmotion_config.netStackInstance.ipRouteConfig = vim.host.IpRouteConfig()
+ vmotion_config.netStackInstance.ipRouteConfig.defaultGateway = self.vmotion['gateway']
+ vmotion_config.netStackInstance.ipRouteConfig.ipV6DefaultGateway = self.vmotion['ipv6_gateway']
+ vmotion_config.netStackInstance.congestionControlAlgorithm = self.vmotion['congestion_algorithm']
+ vmotion_config.netStackInstance.requestedMaxNumberOfConnections = self.vmotion['max_num_connections']
+ self.new_net_stack_instance_configs.netStackSpec.append(vmotion_config)
+
+ if self.vxlan and self.enabled_net_stack_instance['vxlan']:
+ vxlan_config = vim.host.NetworkConfig.NetStackSpec()
+ vxlan_config.operation = 'edit'
+ vxlan_config.netStackInstance = vim.host.NetStackInstance()
+ vxlan_config.netStackInstance.key = self.net_stack_instance_keys['vxlan']
+ vxlan_config.netStackInstance.ipRouteConfig = vim.host.IpRouteConfig()
+ vxlan_config.netStackInstance.ipRouteConfig.defaultGateway = self.vxlan['gateway']
+ vxlan_config.netStackInstance.ipRouteConfig.ipV6DefaultGateway = self.vxlan['ipv6_gateway']
+ vxlan_config.netStackInstance.congestionControlAlgorithm = self.vxlan['congestion_algorithm']
+ vxlan_config.netStackInstance.requestedMaxNumberOfConnections = self.vxlan['max_num_connections']
+ self.new_net_stack_instance_configs.netStackSpec.append(vxlan_config)
+
+ def execute(self):
+ # The host name is unique in vCenter, so find the host from the whole.
+ self.host_obj = self.find_hostsystem_by_name(self.esxi_hostname)
+ if self.host_obj is None:
+ self.module.fail_json(msg="Cannot find the specified ESXi host: %s" % self.params['esxi_hostname'])
+
+ self.check_enabled_net_stack_instance()
+ self.get_net_stack_instance_config()
+ self.diff_net_stack_instance_config()
+ if self.change_flag:
+ if self.module.check_mode is False:
+ self.generate_net_stack_instance_config()
+ try:
+ self.host_obj.configManager.networkSystem.UpdateNetworkConfig(self.new_net_stack_instance_configs, 'modify')
+ except vim.fault.PlatformConfigFault as e:
+ self.module.fail_json(msg="cannot modify tcpip stack config: %s" % to_text(e.faultMessage[0].message))
+ except Exception as e:
+ self.module.fail_json(msg="cannot modify tcpip stack config: %s" % to_text(e.msg))
+
+ # Make a warning for the item if it isn't supported by ESXi when specified item.
+ for key, value in self.enabled_net_stack_instance.items():
+ if self.params[key] and value is False:
+ self.module.warn("%s isn't supported in %s" % (key, self.params['esxi_hostname']))
+
+ # Make the return value for the result.
+ result = dict(
+ changed=self.change_flag,
+ diff=dict(
+ before=OrderedDict(sorted(self.diff_config['before'].items())),
+ after=OrderedDict(sorted(self.diff_config['after'].items()))
+ )
+ )
+ for key, value in self.enabled_net_stack_instance.items():
+ if value:
+ result[key] = self.diff_config['after'][key]
+ else:
+ result[key] = {}
+ self.module.exit_json(**result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ esxi_hostname=dict(type='str', required=True),
+ default=dict(type='dict',
+ options=dict(
+ hostname=dict(type='str', required=True),
+ domain=dict(type='str', required=True),
+ preferred_dns=dict(type='str'),
+ alternate_dns=dict(type='str'),
+ search_domains=dict(type='list', elements='str', default=[]),
+ gateway=dict(type='str'),
+ ipv6_gateway=dict(type='str'),
+ congestion_algorithm=dict(type='str', choices=['newreno', 'cubic'], default='newreno'),
+ max_num_connections=dict(type='int', default=11000)
+
+ )),
+ provisioning=dict(type='dict',
+ options=dict(
+ gateway=dict(type='str'),
+ ipv6_gateway=dict(type='str'),
+ congestion_algorithm=dict(type='str', choices=['newreno', 'cubic'], default='newreno'),
+ max_num_connections=dict(type='int', default=11000)
+ )),
+ vmotion=dict(type='dict',
+ options=dict(
+ gateway=dict(type='str'),
+ ipv6_gateway=dict(type='str'),
+ congestion_algorithm=dict(type='str', choices=['newreno', 'cubic'], default='newreno'),
+ max_num_connections=dict(type='int', default=11000)
+ )),
+ vxlan=dict(type='dict',
+ aliases=['nsx_overlay'],
+ options=dict(
+ gateway=dict(type='str'),
+ ipv6_gateway=dict(type='str'),
+ congestion_algorithm=dict(type='str', choices=['newreno', 'cubic'], default='newreno'),
+ max_num_connections=dict(type='int', default=11000)
+ ))
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vmware_host_tcpip_stack = VmwareHostTcpipStack(module)
+ vmware_host_tcpip_stack.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_user_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_user_manager.py
new file mode 100644
index 000000000..7a81f5f0a
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_user_manager.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2022, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: vmware_host_user_manager
+short_description: Manage users of ESXi
+author:
+ - sky-joker (@sky-joker)
+description:
+ - This module can add, update or delete local users on ESXi host.
+version_added: '2.1.0'
+options:
+ esxi_hostname:
+ description:
+ - Name of the ESXi host that is managing the local user.
+ type: str
+ required: true
+ user_name:
+ description:
+ - Name of the local user.
+ aliases:
+ - local_user_name
+ type: str
+ required: true
+ user_password:
+ description:
+ - The local user password.
+ - If you'd like to update the password, require the I(override_user_password) is true.
+ aliases:
+ - local_user_password
+ type: str
+ user_description:
+ description:
+ - The local user description.
+ aliases:
+ - local_user_description
+ type: str
+ override_user_password:
+ description:
+ - If the local user exists and updates the password, change this parameter value is true.
+ default: false
+ type: bool
+ state:
+ description:
+ - If set to C(present), add a new local user or update information.
+ - If set to C(absent), delete the local user.
+ default: present
+ type: str
+ choices:
+ - present
+ - absent
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Add new local user to ESXi host
+ community.vmware.vmware_host_user_manager:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ esxi_hostname: "{{ esxi1 }}"
+ user_name: example
+ user_description: "example user"
+ user_password: "{{ local_user_password }}"
+ state: present
+
+- name: Update the local user password in ESXi host
+ community.vmware.vmware_host_user_manager:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ esxi_hostname: "{{ esxi1 }}"
+ user_name: example
+ user_description: "example user"
+ user_password: "{{ local_user_password }}"
+ override_user_password: true
+ state: present
+
+- name: Delete the local user in ESXi host
+ community.vmware.vmware_host_user_manager:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ esxi_hostname: "{{ esxi1 }}"
+ user_name: example
+ state: absent
+'''
+
+RETURN = r'''
+msg:
+ description: The executed result for the module.
+ returned: always
+ type: str
+ sample: >-
+ {
+ "msg": "Added the new user.
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VmwareHostUserManager(PyVmomi):
+ def __init__(self, module):
+ super(VmwareHostUserManager, self).__init__(module)
+ self.esxi_hostname = module.params["esxi_hostname"]
+ self.user_name = module.params["user_name"]
+ self.user_password = module.params["user_password"]
+ self.user_description = module.params["user_description"]
+ self.override_user_password = module.params["override_user_password"]
+ self.state = module.params["state"]
+
+ def search_user(self):
+ """
+ Search the specified user from ESXi
+
+ Returns: searched user
+ """
+ searchStr = self.user_name
+ exactMatch = True
+ findUsers = True
+ findGroups = False
+ user_account = self.host_obj.configManager.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups)
+ return user_account
+
+ def ensure_user_info_diff(self, user_account):
+ """
+ Ensure a user information difference.
+ The method can check a user description difference only.
+ Also, it can't get the set password via vSphere API.
+
+ Returns: bool
+ """
+ if user_account.fullName != self.user_description and self.user_description is not None:
+ return True
+
+ return False
+
+ def add_user(self):
+ """
+ Add a new user
+ """
+ user_spec = vim.host.LocalAccountManager.AccountSpecification(
+ id=self.user_name,
+ description=self.user_description,
+ password=self.user_password
+ )
+ try:
+ self.host_obj.configManager.accountManager.CreateUser(user_spec)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to add a new user: %s" % to_text(e.msg))
+
+ def update_user(self):
+ """
+ Update a user information
+ """
+ user_spec = vim.host.LocalAccountManager.AccountSpecification(
+ id=self.user_name,
+ description=self.user_description
+ )
+
+ if self.user_password and self.override_user_password:
+ user_spec.password = self.user_password
+
+ try:
+ self.host_obj.configManager.accountManager.UpdateUser(user_spec)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to update a new password: %s" % to_text(e))
+
+ def remove_user(self):
+ """
+ Remove a user
+ """
+ try:
+ self.host_obj.configManager.accountManager.RemoveUser(self.user_name)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to remove a user: %s" % to_text(e.msg))
+
+ def execute(self):
+ # The host name is unique in vCenter, so find the host from the whole.
+ self.host_obj = self.find_hostsystem_by_name(self.esxi_hostname)
+ if self.host_obj is None:
+ self.module.fail_json(msg="Cannot find the specified ESXi host: %s" % self.params['esxi_hostname'])
+
+ # Search the specified user
+ user_account = self.search_user()
+
+ changed = False
+ msg = "The change will not occur for the user information."
+ if self.state == "present":
+ if user_account:
+ user_diff = self.ensure_user_info_diff(user_account[0])
+ # If you want to update a user password, require the override_user_passwd is true.
+ if user_diff or self.override_user_password:
+ changed = True
+ if self.module.check_mode:
+ msg = "The user information will be updated."
+ else:
+ msg = "Updated the user information."
+ self.update_user()
+ else:
+ changed = True
+ if self.module.check_mode:
+ msg = "The new user will be added."
+ else:
+ msg = "Added the new user."
+ self.add_user()
+
+ if self.state == "absent":
+ if user_account:
+ changed = True
+ if self.module.check_mode:
+ msg = "The user will be removed."
+ else:
+ msg = "Removed the user."
+ self.remove_user()
+
+ self.module.exit_json(changed=changed, msg=msg)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ esxi_hostname=dict(type="str", required=True),
+ user_name=dict(type="str", required=True, aliases=["local_user_name"]),
+ user_password=dict(type="str", aliases=["local_user_password"], no_log=True),
+ user_description=dict(type="str", aliases=["local_user_description"]),
+ override_user_password=dict(type="bool", default=False, no_log=False),
+ state=dict(type="str", default="present", choices=["present", "absent"])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('override_user_password', True, ['user_password']),
+ ]
+ )
+ vmware_host_user_manager = VmwareHostUserManager(module)
+ vmware_host_user_manager.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_vmhba_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_vmhba_info.py
new file mode 100644
index 000000000..5d625b774
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_vmhba_info.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_vmhba_info
+short_description: Gathers info about vmhbas available on the given ESXi host
+description:
+- This module can be used to gather information about vmhbas available on the given ESXi host.
+- If C(cluster_name) is provided, then vmhba information about all hosts from given cluster will be returned.
+- If C(esxi_hostname) is provided, then vmhba information about given host system will be returned.
+author:
+- Christian Kotte (@ckotte)
+options:
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - Vmhba information about this ESXi server will be returned.
+ - This parameter is required if C(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - Vmhba information about each ESXi server will be returned for the given cluster.
+ - This parameter is required if C(esxi_hostname) is not specified.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info about vmhbas of all ESXi Host in the given Cluster
+ community.vmware.vmware_host_vmhba_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ delegate_to: localhost
+ register: cluster_host_vmhbas
+
+- name: Gather info about vmhbas of an ESXi Host
+ community.vmware.vmware_host_vmhba_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+ register: host_vmhbas
+'''
+
+RETURN = r'''
+hosts_vmhbas_info:
+ description:
+ - dict with hostname as key and dict with vmhbas information as value.
+ returned: hosts_vmhbas_info
+ type: dict
+ sample:
+ {
+ "10.76.33.204": {
+ "vmhba_details": [
+ {
+ "adapter": "HPE Smart Array P440ar",
+ "bus": 3,
+ "device": "vmhba0",
+ "driver": "nhpsa",
+ "location": "0000:03:00.0",
+ "model": "Smart Array P440ar",
+ "node_wwn": "50:01:43:80:37:18:9e:a0",
+ "status": "unknown",
+ "type": "SAS"
+ },
+ {
+ "adapter": "QLogic Corp ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
+ "bus": 5,
+ "device": "vmhba1",
+ "driver": "qlnativefc",
+ "location": "0000:05:00.0",
+ "model": "ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
+ "node_wwn": "57:64:96:32:15:90:23:95:82",
+ "port_type": "unknown",
+ "port_wwn": "57:64:96:32:15:90:23:95:82",
+ "speed": 8,
+ "status": "online",
+ "type": "Fibre Channel"
+ },
+ {
+ "adapter": "QLogic Corp ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
+ "bus": 8,
+ "device": "vmhba2",
+ "driver": "qlnativefc",
+ "location": "0000:08:00.0",
+ "model": "ISP2532-based 8Gb Fibre Channel to PCI Express HBA",
+ "node_wwn": "57:64:96:32:15:90:23:95:21",
+ "port_type": "unknown",
+ "port_wwn": "57:64:96:32:15:90:23:95:21",
+ "speed": 8,
+ "status": "online",
+ "type": "Fibre Channel"
+ }
+ ],
+ }
+ }
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class HostVmhbaMgr(PyVmomi):
+ """Class to manage vmhba info"""
+
+ def __init__(self, module):
+ super(HostVmhbaMgr, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system.")
+
+ def gather_host_vmhba_info(self):
+ """Gather vmhba info"""
+ hosts_vmhba_info = {}
+ for host in self.hosts:
+ host_vmhba_info = dict()
+ host_st_system = host.configManager.storageSystem
+ if host_st_system:
+ device_info = host_st_system.storageDeviceInfo
+ host_vmhba_info['vmhba_details'] = []
+ for hba in device_info.hostBusAdapter:
+ hba_info = dict()
+ if hba.pci:
+ hba_info['location'] = hba.pci
+ for pci_device in host.hardware.pciDevice:
+ if pci_device.id == hba.pci:
+ hba_info['adapter'] = pci_device.vendorName + ' ' + pci_device.deviceName
+ break
+ else:
+ hba_info['location'] = 'PCI'
+ hba_info['device'] = hba.device
+ # contains type as string in format of 'key-vim.host.FibreChannelHba-vmhba1'
+ hba_type = hba.key.split(".")[-1].split("-")[0]
+ if hba_type == 'SerialAttachedHba':
+ hba_info['type'] = 'SAS'
+ elif hba_type == 'FibreChannelHba':
+ hba_info['type'] = 'Fibre Channel'
+ else:
+ hba_info['type'] = hba_type
+ hba_info['bus'] = hba.bus
+ hba_info['status'] = hba.status
+ hba_info['model'] = hba.model
+ hba_info['driver'] = hba.driver
+ try:
+ if isinstance(hba, (vim.host.FibreChannelHba, vim.host.FibreChannelOverEthernetHba)):
+ hba_info['node_wwn'] = self.format_number('%X' % hba.nodeWorldWideName)
+ else:
+ hba_info['node_wwn'] = self.format_number(hba.nodeWorldWideName)
+ except AttributeError:
+ pass
+ try:
+ if isinstance(hba, (vim.host.FibreChannelHba, vim.host.FibreChannelOverEthernetHba)):
+ hba_info['port_wwn'] = self.format_number('%X' % hba.portWorldWideName)
+ else:
+ hba_info['port_wwn'] = self.format_number(hba.portWorldWideName)
+ except AttributeError:
+ pass
+ try:
+ hba_info['port_type'] = hba.portType
+ except AttributeError:
+ pass
+ try:
+ hba_info['speed'] = hba.speed
+ except AttributeError:
+ pass
+ host_vmhba_info['vmhba_details'].append(hba_info)
+
+ hosts_vmhba_info[host.name] = host_vmhba_info
+ return hosts_vmhba_info
+
+ @staticmethod
+ def format_number(number):
+ """Format number"""
+ string = str(number)
+ return ':'.join(a + b for a, b in zip(string[::2], string[1::2]))
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True,
+ )
+
+ host_vmhba_mgr = HostVmhbaMgr(module)
+ module.exit_json(changed=False, hosts_vmhbas_info=host_vmhba_mgr.gather_host_vmhba_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_host_vmnic_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_host_vmnic_info.py
new file mode 100644
index 000000000..02a5ac827
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_host_vmnic_info.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_vmnic_info
+short_description: Gathers info about vmnics available on the given ESXi host
+description:
+- This module can be used to gather information about vmnics available on the given ESXi host.
+- If C(cluster_name) is provided, then vmnic information about all hosts from given cluster will be returned.
+- If C(esxi_hostname) is provided, then vmnic information about given host system will be returned.
+- Additional details about vswitch and dvswitch with respective vmnic is also provided which is added in 2.7 version.
+- Additional details about lldp added in 1.11.0
+author:
+- Abhijeet Kasurde (@Akasurde)
+- Christian Kotte (@ckotte)
+options:
+ capabilities:
+ description:
+ - Gather information about general capabilities (Auto negotiation, Wake On LAN, and Network I/O Control).
+ type: bool
+ default: false
+ directpath_io:
+ description:
+ - Gather information about DirectPath I/O capabilities and configuration.
+ type: bool
+ default: false
+ sriov:
+ description:
+ - Gather information about SR-IOV capabilities and configuration.
+ type: bool
+ default: false
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - Vmnic information about this ESXi server will be returned.
+ - This parameter is required if C(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - Vmnic information about each ESXi server will be returned for the given cluster.
+ - This parameter is required if C(esxi_hostname) is not specified.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info about vmnics of all ESXi Host in the given Cluster
+ community.vmware.vmware_host_vmnic_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ delegate_to: localhost
+ register: cluster_host_vmnics
+
+- name: Gather info about vmnics of an ESXi Host
+ community.vmware.vmware_host_vmnic_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+ register: host_vmnics
+'''
+
+RETURN = r'''
+hosts_vmnics_info:
+ description:
+ - dict with hostname as key and dict with vmnics information as value.
+ - for C(num_vmnics), only NICs starting with vmnic are counted. NICs like vusb* are not counted.
+ - details about vswitch and dvswitch was added in version 2.7.
+ - details about vmnics was added in version 2.8.
+ - details about lldp was added in version 1.11.0
+ returned: hosts_vmnics_info
+ type: dict
+ sample:
+ {
+ "10.76.33.204": {
+ "all": [
+ "vmnic0",
+ "vmnic1"
+ ],
+ "available": [],
+ "dvswitch": {
+ "dvs_0002": [
+ "vmnic1"
+ ]
+ },
+ "num_vmnics": 2,
+ "used": [
+ "vmnic1",
+ "vmnic0"
+ ],
+ "vmnic_details": [
+ {
+ "actual_duplex": "Full Duplex",
+ "actual_speed": 10000,
+ "adapter": "Intel(R) 82599 10 Gigabit Dual Port Network Connection",
+ "configured_duplex": "Auto negotiate",
+ "configured_speed": "Auto negotiate",
+ "device": "vmnic0",
+ "driver": "ixgbe",
+ "lldp_info": {
+ "Aggregated Port ID": "0",
+ "Aggregation Status": "1",
+ "Enabled Capabilities": {
+ "_vimtype": "vim.host.PhysicalNic.CdpDeviceCapability",
+ "host": false,
+ "igmpEnabled": false,
+ "networkSwitch": false,
+ "repeater": false,
+ "router": true,
+ "sourceRouteBridge": false,
+ "transparentBridge": true
+ },
+ "MTU": "9216",
+ "Port Description": "switch port description",
+ "Samples": 18814,
+ "System Description": "omitted from output",
+ "System Name": "sw1",
+ "TimeOut": 30,
+ "Vlan ID": "1"
+ },
+ "location": "0000:01:00.0",
+ "mac": "aa:bb:cc:dd:ee:ff",
+ "status": "Connected",
+ },
+ {
+ "actual_duplex": "Full Duplex",
+ "actual_speed": 10000,
+ "adapter": "Intel(R) 82599 10 Gigabit Dual Port Network Connection",
+ "configured_duplex": "Auto negotiate",
+ "configured_speed": "Auto negotiate",
+ "device": "vmnic1",
+ "driver": "ixgbe",
+ "lldp_info": "N/A",
+ "location": "0000:01:00.1",
+ "mac": "ab:ba:cc:dd:ee:ff",
+ "status": "Connected",
+ },
+ ],
+ "vswitch": {
+ "vSwitch0": [
+ "vmnic0"
+ ]
+ }
+ }
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs
+
+
+class HostVmnicMgr(PyVmomi):
+ """Class to manage vmnic info"""
+
+ def __init__(self, module):
+ super(HostVmnicMgr, self).__init__(module)
+ self.capabilities = self.params.get('capabilities')
+ self.directpath_io = self.params.get('directpath_io')
+ self.sriov = self.params.get('sriov')
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system.")
+
+ def find_dvs_by_uuid(self, uuid=None):
+ """Find DVS by it's UUID"""
+ dvs_obj = None
+ if uuid is None:
+ return dvs_obj
+
+ dvswitches = get_all_objs(self.content, [vim.DistributedVirtualSwitch])
+ for dvs in dvswitches:
+ if dvs.uuid == uuid:
+ dvs_obj = dvs
+ break
+
+ return dvs_obj
+
+ def gather_host_vmnic_info(self):
+ """Gather vmnic info"""
+ hosts_vmnic_info = {}
+ for host in self.hosts:
+ host_vmnic_info = dict(all=[], available=[], used=[], vswitch=dict(), dvswitch=dict())
+ host_nw_system = host.configManager.networkSystem
+ if host_nw_system:
+ nw_config = host_nw_system.networkConfig
+ vmnics = [pnic.device for pnic in nw_config.pnic if pnic.device.startswith('vmnic')]
+ host_vmnic_info['all'] = [pnic.device for pnic in nw_config.pnic]
+ host_vmnic_info['num_vmnics'] = len(vmnics)
+ host_vmnic_info['vmnic_details'] = []
+ for pnic in host.config.network.pnic:
+ pnic_info = dict()
+ if pnic.device.startswith('vmnic'):
+ if pnic.pci:
+ pnic_info['location'] = pnic.pci
+ for pci_device in host.hardware.pciDevice:
+ if pci_device.id == pnic.pci:
+ pnic_info['adapter'] = pci_device.vendorName + ' ' + pci_device.deviceName
+ break
+ else:
+ pnic_info['location'] = 'PCI'
+ pnic_info['device'] = pnic.device
+ pnic_info['driver'] = pnic.driver
+ if pnic.linkSpeed:
+ pnic_info['status'] = 'Connected'
+ pnic_info['actual_speed'] = pnic.linkSpeed.speedMb
+ pnic_info['actual_duplex'] = 'Full Duplex' if pnic.linkSpeed.duplex else 'Half Duplex'
+ try:
+ network_hint = host_nw_system.QueryNetworkHint(pnic.device)
+ for hint in self.to_json(network_hint):
+ if hint.get('lldpInfo'):
+ pnic_info['lldp_info'] = {x['key']: x['value'] for x in hint['lldpInfo'].get('parameter')}
+ else:
+ pnic_info['lldp_info'] = 'N/A'
+ if hint.get('connectedSwitchPort'):
+ pnic_info['cdp_info'] = hint.get('connectedSwitchPort')
+ else:
+ pnic_info['cdp_info'] = 'N/A'
+ except (vmodl.fault.HostNotConnected, vmodl.fault.HostNotReachable):
+ pnic_info['lldp_info'] = 'N/A'
+ pnic_info['cdp_info'] = 'N/A'
+ else:
+ pnic_info['status'] = 'Disconnected'
+ pnic_info['actual_speed'] = 'N/A'
+ pnic_info['actual_duplex'] = 'N/A'
+ pnic_info['lldp_info'] = 'N/A'
+ pnic_info['cdp_info'] = 'N/A'
+ if pnic.spec.linkSpeed:
+ pnic_info['configured_speed'] = pnic.spec.linkSpeed.speedMb
+ pnic_info['configured_duplex'] = 'Full Duplex' if pnic.spec.linkSpeed.duplex else 'Half Duplex'
+ else:
+ pnic_info['configured_speed'] = 'Auto negotiate'
+ pnic_info['configured_duplex'] = 'Auto negotiate'
+ pnic_info['mac'] = pnic.mac
+ # General NIC capabilities
+ if self.capabilities:
+ pnic_info['nioc_status'] = 'Allowed' if pnic.resourcePoolSchedulerAllowed else 'Not allowed'
+ pnic_info['auto_negotiation_supported'] = pnic.autoNegotiateSupported
+ pnic_info['wake_on_lan_supported'] = pnic.wakeOnLanSupported
+ # DirectPath I/O and SR-IOV capabilities and configuration
+ if self.directpath_io:
+ pnic_info['directpath_io_supported'] = pnic.vmDirectPathGen2Supported
+ if self.directpath_io or self.sriov:
+ if pnic.pci:
+ for pci_device in host.configManager.pciPassthruSystem.pciPassthruInfo:
+ if pci_device.id == pnic.pci:
+ if self.directpath_io:
+ pnic_info['passthru_enabled'] = pci_device.passthruEnabled
+ pnic_info['passthru_capable'] = pci_device.passthruCapable
+ pnic_info['passthru_active'] = pci_device.passthruActive
+ if self.sriov:
+ try:
+ if pci_device.sriovCapable:
+ pnic_info['sriov_status'] = (
+ 'Enabled' if pci_device.sriovEnabled else 'Disabled'
+ )
+ pnic_info['sriov_active'] = \
+ pci_device.sriovActive
+ pnic_info['sriov_virt_functions'] = \
+ pci_device.numVirtualFunction
+ pnic_info['sriov_virt_functions_requested'] = \
+ pci_device.numVirtualFunctionRequested
+ pnic_info['sriov_virt_functions_supported'] = \
+ pci_device.maxVirtualFunctionSupported
+ else:
+ pnic_info['sriov_status'] = 'Not supported'
+ except AttributeError:
+ pnic_info['sriov_status'] = 'Not supported'
+ host_vmnic_info['vmnic_details'].append(pnic_info)
+
+ vswitch_vmnics = []
+ proxy_switch_vmnics = []
+ if nw_config.vswitch:
+ for vswitch in nw_config.vswitch:
+ host_vmnic_info['vswitch'][vswitch.name] = []
+ # Workaround for "AttributeError: 'NoneType' object has no attribute 'nicDevice'"
+ # this issue doesn't happen every time; vswitch.spec.bridge.nicDevice exists!
+ try:
+ for vnic in vswitch.spec.bridge.nicDevice:
+ vswitch_vmnics.append(vnic)
+ host_vmnic_info['vswitch'][vswitch.name].append(vnic)
+ except AttributeError:
+ pass
+
+ if nw_config.proxySwitch:
+ for proxy_config in nw_config.proxySwitch:
+ dvs_obj = self.find_dvs_by_uuid(uuid=proxy_config.uuid)
+ if dvs_obj:
+ host_vmnic_info['dvswitch'][dvs_obj.name] = []
+ for proxy_nic in proxy_config.spec.backing.pnicSpec:
+ proxy_switch_vmnics.append(proxy_nic.pnicDevice)
+ if dvs_obj:
+ host_vmnic_info['dvswitch'][dvs_obj.name].append(proxy_nic.pnicDevice)
+
+ used_vmics = proxy_switch_vmnics + vswitch_vmnics
+ host_vmnic_info['used'] = used_vmics
+ host_vmnic_info['available'] = [pnic.device for pnic in nw_config.pnic if pnic.device not in used_vmics]
+
+ hosts_vmnic_info[host.name] = host_vmnic_info
+ return hosts_vmnic_info
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ capabilities=dict(type='bool', required=False, default=False),
+ directpath_io=dict(type='bool', required=False, default=False),
+ sriov=dict(type='bool', required=False, default=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True,
+ )
+
+ host_vmnic_mgr = HostVmnicMgr(module)
+ module.exit_json(changed=False, hosts_vmnics_info=host_vmnic_mgr.gather_host_vmnic_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_local_role_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_local_role_info.py
new file mode 100644
index 000000000..cc9b15f2f
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_local_role_info.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_local_role_info
+short_description: Gather info about local roles on an ESXi host or vCenter
+description:
+ - This module can be used to gather information about local role info on an ESXi host or vCenter
+author:
+- Abhijeet Kasurde (@Akasurde)
+notes:
+ - Be sure that the user used for login, has the appropriate rights to view roles
+ - The module returns a list of dict in version 2.8 and above.
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info about local role from an ESXi (or vCenter)
+ community.vmware.vmware_local_role_info:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ register: fact_details
+ delegate_to: localhost
+- name: Get Admin privileges
+ set_fact:
+ admin_priv: "{{ fact_details.local_role_info | selectattr('role_name', 'equalto', 'Admin') | map(attribute='privileges') | first }}"
+- debug:
+ msg: "{{ admin_priv }}"
+'''
+
+RETURN = r'''
+local_role_info:
+ description: A list of dict about role information present on ESXi host
+ returned: always
+ type: list
+ sample: [
+ {
+ "privileges": [
+ "Alarm.Acknowledge",
+ "Alarm.Create",
+ "Alarm.Delete",
+ "Alarm.DisableActions",
+ ],
+ "role_id": -12,
+ "role_info_label": "Ansible User",
+ "role_info_summary": "Ansible Automation user",
+ "role_name": "AnsiUser1",
+ "role_system": true
+ },
+ {
+ "privileges": [],
+ "role_id": -5,
+ "role_info_label": "No access",
+ "role_info_summary": "Used for restricting granted access",
+ "role_name": "NoAccess",
+ "role_system": true
+ },
+ {
+ "privileges": [
+ "System.Anonymous",
+ "System.View"
+ ],
+ "role_id": -3,
+ "role_info_label": "View",
+ "role_info_summary": "Visibility access (cannot be granted)",
+ "role_name": "View",
+ "role_system": true
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VMwareLocalRoleInfo(PyVmomi):
+ """Class to manage local role info"""
+
+ def __init__(self, module):
+ super(VMwareLocalRoleInfo, self).__init__(module)
+ self.module = module
+ self.params = module.params
+
+ if self.content.authorizationManager is None:
+ self.module.fail_json(
+ msg="Failed to get local authorization manager settings.",
+ details="It seems that '%s' does not support this functionality" % self.params['hostname']
+ )
+
+ def gather_local_role_info(self):
+ """Gather info about local roles"""
+ results = list()
+ for role in self.content.authorizationManager.roleList:
+ results.append(
+ dict(
+ role_name=role.name,
+ role_id=role.roleId,
+ privileges=list(role.privilege),
+ role_system=role.system,
+ role_info_label=role.info.label,
+ role_info_summary=role.info.summary,
+ )
+ )
+
+ self.module.exit_json(changed=False, local_role_info=results)
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ vmware_local_role_info = VMwareLocalRoleInfo(module)
+ vmware_local_role_info.gather_local_role_info()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_local_role_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_local_role_manager.py
new file mode 100644
index 000000000..e4c672d2c
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_local_role_manager.py
@@ -0,0 +1,403 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_local_role_manager
+short_description: Manage local roles on an ESXi host or vCenter
+description:
+ - This module can be used to manage local roles on an ESXi host or vCenter.
+author:
+- Abhijeet Kasurde (@Akasurde)
+- Christian Kotte (@ckotte)
+notes:
+ - Be sure that the user used for login, has the appropriate rights to create / delete / edit roles
+options:
+ local_role_name:
+ description:
+ - The local role name to be managed.
+ required: true
+ type: str
+ local_privilege_ids:
+ description:
+ - The list of privileges that role needs to have.
+ - Please see U(https://docs.vmware.com/en/VMware-vSphere/6.0/com.vmware.vsphere.security.doc/GUID-ED56F3C4-77D0-49E3-88B6-B99B8B437B62.html)
+ default: []
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicate desired state of the role.
+ - If the role already exists when C(state=present), the role info is updated.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ force_remove:
+ description:
+ - If set to C(false) then prevents the role from being removed if any permissions are using it.
+ default: false
+ type: bool
+ action:
+ description:
+ - This parameter is only valid while updating an existing role with privileges.
+ - C(add) will add the privileges to the existing privilege list.
+ - C(remove) will remove the privileges from the existing privilege list.
+ - C(set) will replace the privileges of the existing privileges with user defined list of privileges.
+ default: set
+ choices: [ add, remove, set ]
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add local role to ESXi
+ community.vmware.vmware_local_role_manager:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ local_role_name: vmware_qa
+ state: present
+ delegate_to: localhost
+
+- name: Add local role with privileges to vCenter
+ community.vmware.vmware_local_role_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ local_role_name: vmware_qa
+ local_privilege_ids: [ 'Folder.Create', 'Folder.Delete']
+ state: present
+ delegate_to: localhost
+
+- name: Remove local role from ESXi
+ community.vmware.vmware_local_role_manager:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ local_role_name: vmware_qa
+ state: absent
+ delegate_to: localhost
+
+- name: Add a privilege to an existing local role
+ community.vmware.vmware_local_role_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ local_role_name: vmware_qa
+ local_privilege_ids: [ 'Folder.Create' ]
+ action: add
+ delegate_to: localhost
+
+- name: Remove a privilege to an existing local role
+ community.vmware.vmware_local_role_manager:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ local_role_name: vmware_qa
+ local_privilege_ids: [ 'Folder.Create' ]
+ action: remove
+ delegate_to: localhost
+
+- name: Set a privilege to an existing local role
+ community.vmware.vmware_local_role_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ local_role_name: vmware_qa
+ local_privilege_ids: [ 'Folder.Create' ]
+ action: set
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+role_name:
+ description: Name of local role
+ returned: always
+ type: str
+role_id:
+ description: Generated local role id
+ returned: always
+ type: int
+privileges:
+ description: List of privileges
+ returned: always
+ type: list
+privileges_previous:
+ description: List of privileges of role before the update
+ returned: on update
+ type: list
+# NOTE: the following keys are deprecated from 2.11 onwards
+local_role_name:
+ description: Name of local role
+ returned: always
+ type: str
+new_privileges:
+ description: List of privileges
+ returned: always
+ type: list
+old_privileges:
+ description: List of privileges of role before the update
+ returned: on update
+ type: list
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VMwareLocalRoleManager(PyVmomi):
+ """Class to manage local roles"""
+
+ def __init__(self, module):
+ super(VMwareLocalRoleManager, self).__init__(module)
+ self.module = module
+ self.params = module.params
+ self.role_name = self.params['local_role_name']
+ self.state = self.params['state']
+ self.priv_ids = self.params['local_privilege_ids']
+ self.force = not self.params['force_remove']
+ self.current_role = None
+ self.action = self.params['action']
+
+ if self.content.authorizationManager is None:
+ self.module.fail_json(
+ msg="Failed to get local authorization manager settings.",
+ details="It seems that '%s' does not support this functionality" % self.params['hostname']
+ )
+
+ def process_state(self):
+ """Process the state of the local role"""
+ local_role_manager_states = {
+ 'absent': {
+ 'present': self.state_remove_role,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_update_role,
+ 'absent': self.state_create_role,
+ }
+ }
+ try:
+ local_role_manager_states[self.state][self.check_local_role_manager_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def check_local_role_manager_state(self):
+ """Check local roles"""
+ auth_role = self.find_authorization_role()
+ if auth_role:
+ self.current_role = auth_role
+ return 'present'
+ return 'absent'
+
+ def find_authorization_role(self):
+ """Find local role"""
+ desired_role = None
+ for role in self.content.authorizationManager.roleList:
+ if role.name == self.role_name:
+ desired_role = role
+ return desired_role
+
+ def state_create_role(self):
+ """Create local role"""
+ role_id = None
+ results = dict()
+ results['role_name'] = self.role_name
+ results['privileges'] = self.priv_ids
+ # NOTE: the following code is deprecated from 2.11 onwards
+ results['local_role_name'] = self.role_name
+ results['new_privileges'] = self.priv_ids
+
+ if self.module.check_mode:
+ results['msg'] = "Role would be created"
+ else:
+ try:
+ role_id = self.content.authorizationManager.AddAuthorizationRole(
+ name=self.role_name,
+ privIds=self.priv_ids
+ )
+ results['role_id'] = role_id
+ results['msg'] = "Role created"
+ except vim.fault.AlreadyExists as already_exists:
+ self.module.fail_json(
+ msg="Failed to create role '%s' as the user specified role name already exists." %
+ self.role_name, details=already_exists.msg
+ )
+ except vim.fault.InvalidName as invalid_name:
+ self.module.fail_json(
+ msg="Failed to create a role %s as the user specified role name is empty" %
+ self.role_name, details=invalid_name.msg
+ )
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to create a role %s as the user specified privileges are unknown" %
+ self.role_name, etails=invalid_argument.msg
+ )
+ self.module.exit_json(changed=True, result=results)
+
+ def state_remove_role(self):
+ """Remove local role"""
+ results = dict()
+ results['role_name'] = self.role_name
+ results['role_id'] = self.current_role.roleId
+ # NOTE: the following code is deprecated from 2.11 onwards
+ results['local_role_name'] = self.role_name
+ if self.module.check_mode:
+ results['msg'] = "Role would be deleted"
+ else:
+ try:
+ self.content.authorizationManager.RemoveAuthorizationRole(
+ roleId=self.current_role.roleId,
+ failIfUsed=self.force
+ )
+ results['msg'] = "Role deleted"
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(
+ msg="Failed to remove a role %s as the user specified role name does not exist." %
+ self.role_name, details=not_found.msg
+ )
+ except vim.fault.RemoveFailed as remove_failed:
+ msg = "Failed to remove role '%s' as the user specified role name." % self.role_name
+ if self.force:
+ msg += " Use force_remove as True."
+ self.module.fail_json(msg=msg, details=remove_failed.msg)
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to remove a role %s as the user specified role is a system role" %
+ self.role_name, details=invalid_argument.msg
+ )
+ self.module.exit_json(changed=True, result=results)
+
+ def state_exit_unchanged(self):
+ """Don't do anything"""
+ results = dict()
+ results['role_name'] = self.role_name
+ # NOTE: the following code is deprecated from 2.11 onwards
+ results['local_role_name'] = self.role_name
+ results['msg'] = "Role not present"
+ self.module.exit_json(changed=False, result=results)
+
+ def state_update_role(self):
+ """Update local role"""
+ changed = False
+ changed_privileges = []
+ results = dict()
+ results['role_name'] = self.role_name
+ results['role_id'] = self.current_role.roleId
+ # NOTE: the following code is deprecated from 2.11 onwards
+ results['local_role_name'] = self.role_name
+
+ current_privileges = self.current_role.privilege
+ results['privileges'] = current_privileges
+ # NOTE: the following code is deprecated from 2.11 onwards
+ results['new_privileges'] = current_privileges
+
+ if self.action == 'add':
+ # Add to existing privileges
+ for priv in self.params['local_privilege_ids']:
+ if priv not in current_privileges:
+ changed_privileges.append(priv)
+ changed = True
+ if changed:
+ changed_privileges.extend(current_privileges)
+ elif self.action == 'set':
+ # Set given privileges
+ # Add system-defined privileges, "System.Anonymous", "System.View", and "System.Read".
+ self.params['local_privilege_ids'].extend(['System.Anonymous', 'System.Read', 'System.View'])
+ changed_privileges = self.params['local_privilege_ids']
+ changes_applied = list(set(current_privileges) ^ set(changed_privileges))
+ if changes_applied:
+ changed = True
+ elif self.action == 'remove':
+ changed_privileges = list(current_privileges)
+ # Remove given privileges from existing privileges
+ for priv in self.params['local_privilege_ids']:
+ if priv in current_privileges:
+ changed = True
+ changed_privileges.remove(priv)
+
+ if changed:
+ results['privileges'] = changed_privileges
+ results['privileges_previous'] = current_privileges
+ # NOTE: the following code is deprecated from 2.11 onwards
+ results['new_privileges'] = changed_privileges
+ results['old_privileges'] = current_privileges
+ if self.module.check_mode:
+ results['msg'] = "Role privileges would be updated"
+ else:
+ try:
+ self.content.authorizationManager.UpdateAuthorizationRole(
+ roleId=self.current_role.roleId,
+ newName=self.current_role.name,
+ privIds=changed_privileges
+ )
+ results['msg'] = "Role privileges updated"
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(
+ msg="Failed to update role. Please check privileges provided for update", details=not_found.msg
+ )
+ except vim.fault.InvalidName as invalid_name:
+ self.module.fail_json(
+ msg="Failed to update role as role name is empty", details=invalid_name.msg
+ )
+ except vim.fault.AlreadyExists as already_exists:
+ self.module.fail_json(
+ msg="Failed to update role", details=already_exists.msg
+ )
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to update role as user specified role is system role which can not be changed",
+ details=invalid_argument.msg
+ )
+ except vim.fault.NoPermission as no_permission:
+ self.module.fail_json(
+ msg="Failed to update role as current session doesn't have any privilege to update specified role",
+ details=no_permission.msg
+ )
+ else:
+ results['msg'] = "Role privileges are properly configured"
+
+ self.module.exit_json(changed=changed, result=results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(local_role_name=dict(required=True, type='str'),
+ local_privilege_ids=dict(default=[], type='list', elements='str'),
+ force_remove=dict(default=False, type='bool'),
+ action=dict(type='str', default='set', choices=[
+ 'add',
+ 'set',
+ 'remove',
+ ]),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ vmware_local_role_manager = VMwareLocalRoleManager(module)
+ vmware_local_role_manager.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_local_user_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_local_user_info.py
new file mode 100644
index 000000000..bd4ef522f
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_local_user_info.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_local_user_info
+short_description: Gather info about users on the given ESXi host
+description:
+ - This module can be used to gather information about users present on the given ESXi host system in VMware infrastructure.
+ - All variables and VMware object names are case sensitive.
+ - User must hold the 'Authorization.ModifyPermissions' privilege to invoke this module.
+author:
+- Abhijeet Kasurde (@Akasurde)
+- Christian Kotte (@ckotte)
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather info about all Users on given ESXi host system
+ community.vmware.vmware_local_user_info:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ delegate_to: localhost
+ register: all_user_info
+'''
+
+RETURN = r'''
+local_user_info:
+ description: metadata about all local users
+ returned: always
+ type: dict
+ sample: [
+ {
+ "role": "admin",
+ "description": "Administrator",
+ "group": false,
+ "user_id": 0,
+ "user_name": "root",
+ "shell_access": true
+ },
+ {
+ "role": "admin",
+ "description": "DCUI User",
+ "group": false,
+ "user_id": 100,
+ "user_name": "dcui",
+ "shell_access": false
+ },
+ ]
+'''
+
+try:
+ from pyVmomi import vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VMwareUserInfoManager(PyVmomi):
+ """Class to manage local user info"""
+
+ def __init__(self, module):
+ super(VMwareUserInfoManager, self).__init__(module)
+
+ if self.is_vcenter():
+ self.module.fail_json(
+ msg="Failed to get local account manager settings.",
+ details="It seems that '%s' is a vCenter server instead of an ESXi server" % self.module.params['hostname']
+ )
+
+ def gather_user_info(self):
+ """Gather info about local users"""
+ results = dict(changed=False, local_user_info=[])
+ search_string = ''
+ exact_match = False
+ find_users = True
+ find_groups = False
+ user_accounts = self.content.userDirectory.RetrieveUserGroups(
+ None, search_string, None, None, exact_match, find_users, find_groups
+ )
+ if user_accounts:
+ for user in user_accounts:
+ temp_user = dict()
+ temp_user['user_name'] = user.principal
+ temp_user['description'] = user.fullName
+ temp_user['group'] = user.group
+ temp_user['user_id'] = user.id
+ temp_user['shell_access'] = user.shellAccess
+ temp_user['role'] = None
+ try:
+ permissions = self.content.authorizationManager.RetrieveEntityPermissions(
+ entity=self.content.rootFolder,
+ inherited=False
+ )
+ except vmodl.fault.ManagedObjectNotFound as not_found:
+ self.module.fail_json(
+ msg="The entity doesn't exist: %s" % to_native(not_found)
+ )
+ for permission in permissions:
+ if permission.principal == user.principal:
+ temp_user['role'] = self.get_role_name(permission.roleId, self.content.authorizationManager.roleList)
+ break
+
+ results['local_user_info'].append(temp_user)
+ self.module.exit_json(**results)
+
+ @staticmethod
+ def get_role_name(role_id, role_list):
+ """Get role name from role ID"""
+ role_name = None
+ # Default role: No access
+ if role_id == -5:
+ role_name = 'no-access'
+ # Default role: Read-only
+ elif role_id == -2:
+ role_name = 'read-only'
+ # Default role: Administrator
+ elif role_id == -1:
+ role_name = 'admin'
+ # Custom roles
+ else:
+ for role in role_list:
+ if role.roleId == role_id:
+ role_name = role.name
+ break
+ return role_name
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ vmware_local_user_info = VMwareUserInfoManager(module)
+ vmware_local_user_info.gather_user_info()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_local_user_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_local_user_manager.py
new file mode 100644
index 000000000..3a95ae2d2
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_local_user_manager.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, IBM Corp
+# Author(s): Andreas Nafpliotis <nafpliot@de.ibm.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_local_user_manager
+short_description: Manage local users on an ESXi host
+description:
+ - Manage local users on an ESXi host
+author:
+- Andreas Nafpliotis (@nafpliot-ibm)
+notes:
+ - Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit users
+options:
+ local_user_name:
+ description:
+ - The local user name to be changed.
+ required: true
+ type: str
+ local_user_password:
+ description:
+ - The password to be set.
+ required: false
+ type: str
+ local_user_description:
+ description:
+ - Description for the user.
+ required: false
+ type: str
+ state:
+ description:
+ - Indicate desired state of the user. If the user already exists when C(state=present), the user info is updated
+ choices: ['present', 'absent']
+ default: present
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add local user to ESXi
+ community.vmware.vmware_local_user_manager:
+ hostname: esxi_hostname
+ username: root
+ password: vmware
+ local_user_name: foo
+ local_user_password: password
+ delegate_to: localhost
+'''
+
+RETURN = r'''# '''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VMwareLocalUserManager(PyVmomi):
+
+ def __init__(self, module):
+ super(VMwareLocalUserManager, self).__init__(module)
+ self.local_user_name = self.module.params['local_user_name']
+ self.local_user_password = self.module.params['local_user_password']
+ self.local_user_description = self.module.params['local_user_description']
+ self.state = self.module.params['state']
+
+ if self.is_vcenter():
+ self.module.fail_json(msg="Failed to get local account manager settings "
+ "from ESXi server: %s" % self.module.params['hostname'],
+ details="It seems that %s is a vCenter server instead of an "
+ "ESXi server" % self.module.params['hostname'])
+
+ def process_state(self):
+ try:
+ local_account_manager_states = {
+ 'absent': {
+ 'present': self.state_remove_user,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_update_user,
+ 'absent': self.state_create_user,
+ }
+ }
+
+ local_account_manager_states[self.state][self.check_local_user_manager_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def check_local_user_manager_state(self):
+ user_account = self.find_user_account()
+ if not user_account:
+ return 'absent'
+ else:
+ return 'present'
+
+ def find_user_account(self):
+ searchStr = self.local_user_name
+ exactMatch = True
+ findUsers = True
+ findGroups = False
+ user_account = self.content.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups)
+ return user_account
+
+ def create_account_spec(self):
+ account_spec = vim.host.LocalAccountManager.AccountSpecification()
+ account_spec.id = self.local_user_name
+ account_spec.password = self.local_user_password
+ account_spec.description = self.local_user_description
+ return account_spec
+
+ def state_create_user(self):
+ account_spec = self.create_account_spec()
+
+ try:
+ self.content.accountManager.CreateUser(account_spec)
+ self.module.exit_json(changed=True)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+ def state_update_user(self):
+ account_spec = self.create_account_spec()
+
+ try:
+ self.content.accountManager.UpdateUser(account_spec)
+ self.module.exit_json(changed=True)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+ def state_remove_user(self):
+ try:
+ self.content.accountManager.RemoveUser(self.local_user_name)
+ self.module.exit_json(changed=True)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(local_user_name=dict(required=True, type='str'),
+ local_user_password=dict(type='str', no_log=True),
+ local_user_description=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[
+ ['state', 'present', ['local_user_password']]
+ ],
+ supports_check_mode=False)
+
+ vmware_local_user_manager = VMwareLocalUserManager(module)
+ vmware_local_user_manager.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_maintenancemode.py b/ansible_collections/community/vmware/plugins/modules/vmware_maintenancemode.py
new file mode 100644
index 000000000..ed198b7cf
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_maintenancemode.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, VMware, Inc.
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_maintenancemode
+short_description: Place a host into maintenance mode
+description:
+ - This module can be used for placing a ESXi host into maintenance mode.
+ - Support for VSAN compliant maintenance mode when selected.
+author:
+- Jay Jahns (@jjahns) <jjahns@vmware.com>
+- Abhijeet Kasurde (@Akasurde)
+options:
+ esxi_hostname:
+ description:
+ - Name of the host as defined in vCenter.
+ required: true
+ type: str
+ vsan:
+ description:
+ - Specify which VSAN compliant mode to enter.
+ choices:
+ - 'ensureObjectAccessibility'
+ - 'evacuateAllData'
+ - 'noAction'
+ required: false
+ aliases: [ 'vsan_mode' ]
+ type: str
+ evacuate:
+ description:
+ - If set to C(true), evacuate all powered off VMs.
+ default: false
+ required: false
+ type: bool
+ timeout:
+ description:
+ - Specify a timeout for the operation.
+ required: false
+ default: 0
+ type: int
+ state:
+ description:
+ - Enter or exit maintenance mode.
+ choices:
+ - present
+ - absent
+ default: present
+ required: false
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Enter VSAN-Compliant Maintenance Mode
+ community.vmware.vmware_maintenancemode:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ vsan: ensureObjectAccessibility
+ evacuate: true
+ timeout: 3600
+ state: present
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+hostsystem:
+ description: Name of vim reference
+ returned: always
+ type: str
+ sample: "'vim.HostSystem:host-236'"
+hostname:
+ description: Name of host in vCenter
+ returned: always
+ type: str
+ sample: "esxi.local.domain"
+status:
+ description: Action taken
+ returned: always
+ type: str
+ sample: "ENTER"
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, TaskError, vmware_argument_spec, wait_for_task
+from ansible.module_utils._text import to_native
+
+
+class VmwareMaintenanceMgr(PyVmomi):
+ def __init__(self, module):
+ super(VmwareMaintenanceMgr, self).__init__(module)
+ self.esxi_hostname = self.module.params.get('esxi_hostname')
+ self.vsan = self.module.params.get('vsan', None)
+ self.host = self.find_hostsystem_by_name(host_name=self.esxi_hostname)
+ if not self.host:
+ self.module.fail_json(msg='Host %s not found in vCenter' % self.esxi_hostname)
+
+ def EnterMaintenanceMode(self):
+ if self.host.runtime.inMaintenanceMode:
+ self.module.exit_json(changed=False,
+ hostsystem=str(self.host),
+ hostname=self.esxi_hostname,
+ status='NO_ACTION',
+ msg='Host %s already in maintenance mode' % self.esxi_hostname)
+
+ spec = vim.host.MaintenanceSpec()
+
+ if self.vsan:
+ spec.vsanMode = vim.vsan.host.DecommissionMode()
+ spec.vsanMode.objectAction = self.vsan
+
+ try:
+ if not self.module.check_mode:
+ task = self.host.EnterMaintenanceMode_Task(self.module.params['timeout'],
+ self.module.params['evacuate'],
+ spec)
+
+ success, result = wait_for_task(task)
+ else:
+ success = True
+
+ self.module.exit_json(changed=success,
+ hostsystem=str(self.host),
+ hostname=self.esxi_hostname,
+ status='ENTER',
+ msg='Host %s entered maintenance mode' % self.esxi_hostname)
+
+ except TaskError as e:
+ self.module.fail_json(msg='Host %s failed to enter maintenance mode due to %s' % (self.esxi_hostname, to_native(e)))
+
+ def ExitMaintenanceMode(self):
+ if not self.host.runtime.inMaintenanceMode:
+ self.module.exit_json(changed=False,
+ hostsystem=str(self.host),
+ hostname=self.esxi_hostname,
+ status='NO_ACTION',
+ msg='Host %s not in maintenance mode' % self.esxi_hostname)
+
+ try:
+ if not self.module.check_mode:
+ task = self.host.ExitMaintenanceMode_Task(self.module.params['timeout'])
+
+ success, result = wait_for_task(task)
+ else:
+ success = True
+
+ self.module.exit_json(changed=success,
+ hostsystem=str(self.host),
+ hostname=self.esxi_hostname,
+ status='EXIT',
+ msg='Host %s exited maintenance mode' % self.esxi_hostname)
+ except TaskError as e:
+ self.module.fail_json(msg='Host %s failed to exit maintenance mode due to %s' % (self.esxi_hostname, to_native(e)))
+
+
+def main():
+ spec = vmware_argument_spec()
+ spec.update(dict(esxi_hostname=dict(type='str', required=True),
+ vsan=dict(type='str',
+ choices=['ensureObjectAccessibility',
+ 'evacuateAllData',
+ 'noAction'],
+ aliases=['vsan_mode'],
+ ),
+ evacuate=dict(type='bool', default=False),
+ timeout=dict(default=0, type='int'),
+ state=dict(required=False, default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec=spec,
+ supports_check_mode=True)
+
+ host_maintenance_mgr = VmwareMaintenanceMgr(module=module)
+
+ if module.params['state'] == 'present':
+ host_maintenance_mgr.EnterMaintenanceMode()
+ elif module.params['state'] == 'absent':
+ host_maintenance_mgr.ExitMaintenanceMode()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_migrate_vmk.py b/ansible_collections/community/vmware/plugins/modules/vmware_migrate_vmk.py
new file mode 100644
index 000000000..f14fac601
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_migrate_vmk.py
@@ -0,0 +1,230 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_migrate_vmk
+short_description: Migrate a VMK interface from VSS to VDS
+description:
+ - Migrate a VMK interface from VSS to VDS
+author:
+- Joseph Callen (@jcpowermac)
+- Russell Teague (@mtnbikenc)
+options:
+ esxi_hostname:
+ description:
+ - ESXi hostname to be managed
+ required: true
+ type: str
+ device:
+ description:
+ - VMK interface name
+ required: true
+ type: str
+ current_switch_name:
+ description:
+ - Switch VMK interface is currently on
+ required: true
+ type: str
+ current_portgroup_name:
+ description:
+ - Portgroup name VMK interface is currently on
+ required: true
+ type: str
+ migrate_switch_name:
+ description:
+ - Switch name to migrate VMK interface to
+ required: true
+ type: str
+ migrate_portgroup_name:
+ description:
+ - Portgroup name to migrate VMK interface to
+ required: true
+ type: str
+ migrate_vlan_id:
+ version_added: '2.4.0'
+ description:
+ - VLAN to use for the VMK interface when migrating from VDS to VSS
+ - Will be ignored when migrating from VSS to VDS
+ type: int
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Migrate Management vmk
+ community.vmware.vmware_migrate_vmk:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ device: vmk1
+ current_switch_name: temp_vswitch
+ current_portgroup_name: esx-mgmt
+ migrate_switch_name: dvSwitch
+ migrate_portgroup_name: Management
+ delegate_to: localhost
+'''
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ vmware_argument_spec, find_dvs_by_name, find_hostsystem_by_name,
+ connect_to_api, find_dvspg_by_name)
+
+
+class VMwareMigrateVmk(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.host_system = None
+ self.migrate_switch_name = self.module.params['migrate_switch_name']
+ self.migrate_portgroup_name = self.module.params['migrate_portgroup_name']
+ self.migrate_vlan_id = self.module.params['migrate_vlan_id']
+ self.device = self.module.params['device']
+ self.esxi_hostname = self.module.params['esxi_hostname']
+ self.current_portgroup_name = self.module.params['current_portgroup_name']
+ self.current_switch_name = self.module.params['current_switch_name']
+ self.content = connect_to_api(module)
+
+ def process_state(self):
+ try:
+ vmk_migration_states = {
+ 'migrate_vss_vds': self.state_migrate_vss_vds,
+ 'migrate_vds_vss': self.state_migrate_vds_vss,
+ 'migrated': self.state_exit_unchanged
+ }
+
+ vmk_migration_states[self.check_vmk_current_state()]()
+
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def create_host_vnic_config_vds_vss(self):
+ host_vnic_config = vim.host.VirtualNic.Config()
+ host_vnic_config.spec = vim.host.VirtualNic.Specification()
+ host_vnic_config.changeOperation = "edit"
+ host_vnic_config.device = self.device
+ host_vnic_config.spec.portgroup = self.migrate_portgroup_name
+ return host_vnic_config
+
+ def create_port_group_config_vds_vss(self):
+ port_group_config = vim.host.PortGroup.Config()
+ port_group_config.spec = vim.host.PortGroup.Specification()
+ port_group_config.changeOperation = "add"
+ port_group_config.spec.name = self.migrate_portgroup_name
+ port_group_config.spec.vlanId = self.migrate_vlan_id if self.migrate_vlan_id is not None else 0
+ port_group_config.spec.vswitchName = self.migrate_switch_name
+ port_group_config.spec.policy = vim.host.NetworkPolicy()
+ return port_group_config
+
+ def state_migrate_vds_vss(self):
+ host_network_system = self.host_system.configManager.networkSystem
+ config = vim.host.NetworkConfig()
+ config.portgroup = [self.create_port_group_config_vds_vss()]
+ host_network_system.UpdateNetworkConfig(config, "modify")
+ config = vim.host.NetworkConfig()
+ config.vnic = [self.create_host_vnic_config_vds_vss()]
+ host_network_system.UpdateNetworkConfig(config, "modify")
+ self.module.exit_json(changed=True)
+
+ def create_host_vnic_config(self, dv_switch_uuid, portgroup_key):
+ host_vnic_config = vim.host.VirtualNic.Config()
+ host_vnic_config.spec = vim.host.VirtualNic.Specification()
+
+ host_vnic_config.changeOperation = "edit"
+ host_vnic_config.device = self.device
+ host_vnic_config.portgroup = ""
+ host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection()
+ host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid
+ host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key
+
+ return host_vnic_config
+
+ def create_port_group_config(self):
+ port_group_config = vim.host.PortGroup.Config()
+ port_group_config.spec = vim.host.PortGroup.Specification()
+
+ port_group_config.changeOperation = "remove"
+ port_group_config.spec.name = self.current_portgroup_name
+ port_group_config.spec.vlanId = -1
+ port_group_config.spec.vswitchName = self.current_switch_name
+ port_group_config.spec.policy = vim.host.NetworkPolicy()
+
+ return port_group_config
+
+ def state_migrate_vss_vds(self):
+ host_network_system = self.host_system.configManager.networkSystem
+
+ dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name)
+ pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name)
+
+ config = vim.host.NetworkConfig()
+ config.portgroup = [self.create_port_group_config()]
+ config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)]
+ host_network_system.UpdateNetworkConfig(config, "modify")
+ self.module.exit_json(changed=True)
+
+ def check_vmk_current_state(self):
+ self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname)
+
+ for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic:
+ if vnic.device == self.device:
+ if vnic.spec.distributedVirtualPort is None:
+ std_vswitches = [vswitch.name for vswitch in self.host_system.configManager.networkSystem.networkInfo.vswitch]
+ if self.current_switch_name not in std_vswitches:
+ return "migrated"
+ if vnic.portgroup == self.current_portgroup_name:
+ return "migrate_vss_vds"
+ else:
+ dvs = find_dvs_by_name(self.content, self.current_switch_name)
+ if dvs is None:
+ return "migrated"
+ if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid:
+ return "migrate_vds_vss"
+
+ self.module.fail_json(msg='Unable to find the specified device %s.' % self.device)
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
+ device=dict(required=True, type='str'),
+ current_switch_name=dict(required=True, type='str'),
+ current_portgroup_name=dict(required=True, type='str'),
+ migrate_switch_name=dict(required=True, type='str'),
+ migrate_portgroup_name=dict(required=True, type='str'),
+ migrate_vlan_id=dict(required=False, type='int')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi required for this module')
+
+ vmware_migrate_vmk = VMwareMigrateVmk(module)
+ vmware_migrate_vmk.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_object_custom_attributes_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_object_custom_attributes_info.py
new file mode 100644
index 000000000..465e84fbf
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_object_custom_attributes_info.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: vmware_object_custom_attributes_info
+short_description: Gather custom attributes of an object
+author:
+ - sky-joker (@sky-joker)
+description:
+ - This module can be gathered custom attributes of an object.
+notes:
+ - Supports C(check_mode).
+options:
+ object_type:
+ description:
+ - Type of an object to work with.
+ type: str
+ choices:
+ - Datacenter
+ - Cluster
+ - HostSystem
+ - ResourcePool
+ - Folder
+ - VirtualMachine
+ - DistributedVirtualSwitch
+ - DistributedVirtualPortgroup
+ - Datastore
+ required: true
+ object_name:
+ description:
+ - Name of the object to work with.
+ type: str
+ aliases:
+ - name
+ moid:
+ description:
+ - Managed Object ID of the instance to get if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(object_name) is not supplied.
+ type: str
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+"""
+
+EXAMPLES = r"""
+- name: Gather custom attributes of a virtual machine
+ community.vmware.vmware_object_custom_attributes_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ object_type: VirtualMachine
+ object_name: "{{ object_name }}"
+ register: vm_attributes
+
+- name: Gather custom attributes of a virtual machine with moid
+ community.vmware.vmware_object_custom_attributes_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ object_type: VirtualMachine
+ moid: "{{ moid }}"
+ register: vm_attributes
+"""
+
+RETURN = r"""
+custom_attributes:
+ description: list of custom attributes of an object.
+ returned: always
+ type: list
+ sample: >-
+ [
+ {
+ "attribute": "example01",
+ "key": 132,
+ "type": "VirtualMachine",
+ "value": "10"
+ },
+ {
+ "attribute": "example02",
+ "key": 131,
+ "type": "VirtualMachine",
+ "value": "20"
+ },
+ {
+ "attribute": "example03",
+ "key": 130,
+ "type": "VirtualMachine",
+ "value": null
+ }
+ ]
+"""
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_obj
+
+
+class VmwareCustomAttributesInfo(PyVmomi):
+ def __init__(self, module):
+ super(VmwareCustomAttributesInfo, self).__init__(module)
+
+ if not self.is_vcenter():
+ self.module.fail_json(msg="You have to connect to a vCenter server!")
+
+ self.object_type = self.params['object_type']
+ self.object_name = self.params['object_name']
+ self.moid = self.params['moid']
+
+ self.valid_object_types = {
+ 'Datacenter': vim.Datacenter,
+ 'Cluster': vim.ClusterComputeResource,
+ 'HostSystem': vim.HostSystem,
+ 'ResourcePool': vim.ResourcePool,
+ 'Folder': vim.Folder,
+ 'VirtualMachine': vim.VirtualMachine,
+ 'DistributedVirtualSwitch': vim.DistributedVirtualSwitch,
+ 'DistributedVirtualPortgroup': vim.DistributedVirtualPortgroup,
+ 'Datastore': vim.Datastore
+ }
+
+ def execute(self):
+ result = {'changed': False}
+
+ if self.object_name:
+ obj = find_obj(self.content, [self.valid_object_types[self.object_type]], self.object_name)
+ elif self.moid:
+ obj = self.find_obj_by_moid(self.object_type, self.moid)
+ if not obj:
+ self.module.fail_json(msg="can't find the object: %s" % self.object_name if self.object_name else self.moid)
+
+ custom_attributes = []
+ available_fields = {}
+ for available_custom_attribute in obj.availableField:
+ available_fields.update({
+ available_custom_attribute.key: {
+ 'name': available_custom_attribute.name,
+ 'type': available_custom_attribute.managedObjectType
+ }
+ })
+
+ custom_values = {}
+ for custom_value in obj.customValue:
+ custom_values.update({
+ custom_value.key: custom_value.value
+ })
+
+ for key, value in available_fields.items():
+ attribute_result = {
+ 'attribute': value['name'],
+ 'type': self.to_json(value['type']).replace('vim.', '') if value['type'] is not None else 'Global',
+ 'key': key,
+ 'value': None
+ }
+
+ if key in custom_values:
+ attribute_result['value'] = custom_values[key]
+
+ custom_attributes.append(attribute_result)
+
+ result['custom_attributes'] = custom_attributes
+ self.module.exit_json(**result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ object_type=dict(type='str', required=True, choices=[
+ 'Datacenter',
+ 'Cluster',
+ 'HostSystem',
+ 'ResourcePool',
+ 'Folder',
+ 'VirtualMachine',
+ 'DistributedVirtualSwitch',
+ 'DistributedVirtualPortgroup',
+ 'Datastore'
+ ]),
+ object_name=dict(type='str', aliases=['name']),
+ moid=dict(type='str')
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['object_name', 'moid']
+ ],
+ required_one_of=[
+ ['object_name', 'moid']
+ ],
+ supports_check_mode=True)
+
+ vmware_custom_attributes_info = VmwareCustomAttributesInfo(module)
+ vmware_custom_attributes_info.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_object_rename.py b/ansible_collections/community/vmware/plugins/modules/vmware_object_rename.py
new file mode 100644
index 000000000..0827c9369
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_object_rename.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: vmware_object_rename
+short_description: Renames VMware objects
+description:
+- This module can be used to rename VMware objects.
+- All variables and VMware object names are case sensitive.
+- Renaming Host and Network is not supported by VMware APIs.
+author:
+- Abhijeet Kasurde (@Akasurde)
+requirements:
+- vSphere Automation SDK
+options:
+ object_type:
+ description:
+ - Type of object to work with.
+ - Valid options are Cluster, ClusterComputeResource, Datacenter, Datastore, Folder, ResourcePool, VM or VirtualMachine.
+ required: true
+ type: str
+ choices:
+ - 'ClusterComputeResource'
+ - 'Cluster'
+ - 'Datacenter'
+ - 'Datastore'
+ - 'Folder'
+ - 'Network'
+ - 'ResourcePool'
+ - 'VM'
+ - 'VirtualMachine'
+ object_name:
+ description:
+ - Name of the object to work with.
+ - Mutually exclusive with C(object_moid).
+ type: str
+ object_moid:
+ description:
+ - Managed object id of the VMware object to work with.
+ - Mutually exclusive with C(object_name).
+ type: str
+ new_name:
+ description:
+ - New name for VMware object.
+ required: true
+ aliases: ['object_new_name']
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware_rest_client.documentation
+'''
+
+EXAMPLES = r'''
+- name: Rename a virtual machine
+ community.vmware.vmware_object_rename:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ new_name: Fedora_31
+ object_name: Fedora_VM
+ object_type: VirtualMachine
+ delegate_to: localhost
+
+- name: Rename a virtual machine using moid
+ community.vmware.vmware_object_rename:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ new_name: Fedora_31
+ object_moid: vm-14
+ object_type: VirtualMachine
+ delegate_to: localhost
+
+- name: Rename a datacenter
+ community.vmware.vmware_object_rename:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ new_name: Asia_Datacenter
+ object_name: dc1
+ object_type: Datacenter
+ delegate_to: localhost
+
+- name: Rename a folder with moid
+ community.vmware.vmware_object_rename:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ new_name: backup
+ object_moid: group-v46
+ object_type: Folder
+ delegate_to: localhost
+
+- name: Rename a cluster with moid
+ community.vmware.vmware_object_rename:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ new_name: CCR_1
+ object_moid: domain-c33
+ object_type: Cluster
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+rename_status:
+ description: metadata about VMware object rename operation
+ returned: on success
+ type: dict
+ sample: {
+ "current_name": "Fedora_31",
+ "desired_name": "Fedora_31",
+ "previous_name": "Fedora_VM",
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, wait_for_task
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+
+class VmwareObjectRename(VmwareRestClient):
+ def __init__(self, module):
+ """
+ Constructor
+ """
+ super(VmwareObjectRename, self).__init__(module)
+ self.pyv = PyVmomi(module=module)
+ self.soap_stub = self.pyv.si._stub
+
+ self.object_type = self.params.get('object_type')
+ self.object_name = self.params.get('object_name')
+ self.object_new_name = self.params.get('new_name')
+ self.object_moid = self.params.get('object_moid')
+
+ self.managed_object = None
+
+ def ensure_state(self):
+ """
+ Manage the internal state of object rename operation
+
+ """
+ results = dict(
+ changed=False,
+ rename_status=dict(),
+ )
+
+ results['rename_status']['desired_name'] = self.object_new_name
+ changed = False
+
+ vcenter_obj = self.api_client.vcenter
+ available_object_types = [i for i in dir(vcenter_obj) if hasattr(getattr(vcenter_obj, i), 'list') and i != 'Host']
+ available_object_types += ['ClusterComputeResource', 'VirtualMachine']
+
+ if self.object_type not in available_object_types:
+ self.module.fail_json(msg="Object type can be any"
+ " one of [%s]" % ", ".join(available_object_types))
+
+ valid_object_types = {
+ 'ClusterComputeResource': [
+ vcenter_obj.Cluster,
+ vim.ClusterComputeResource,
+ 'cluster',
+ ],
+ 'Cluster': [
+ vcenter_obj.Cluster,
+ vim.ClusterComputeResource,
+ 'cluster',
+ ],
+ 'Datacenter': [
+ vcenter_obj.Datacenter,
+ vim.Datacenter,
+ 'datacenter',
+ ],
+ 'Datastore': [
+ vcenter_obj.Datastore,
+ vim.Datastore,
+ 'datastore',
+ ],
+ 'Folder': [
+ vcenter_obj.Folder,
+ vim.Folder,
+ 'folder',
+ ],
+ 'Network': [
+ vcenter_obj.Network,
+ vim.ClusterComputeResource,
+ 'network',
+ ],
+ 'ResourcePool': [
+ vcenter_obj.ResourcePool,
+ vim.ResourcePool,
+ 'resource_pool'
+ ],
+ 'VM': [
+ vcenter_obj.VM,
+ vim.VirtualMachine,
+ 'vm',
+ ],
+ 'VirtualMachine': [
+ vcenter_obj.VM,
+ vim.VirtualMachine,
+ 'vm',
+ ],
+ }
+
+ target_object = valid_object_types[self.object_type][0]
+
+ # Generate filter spec.
+ # List method will be used in getting objects.
+ # List method can get only up to a max of 1,000 objects.
+ # If VCSA has than 1,000 objects with the specified object_type, the following error will occur.
+ # Error: Too many virtual machines. Add more filter criteria to reduce the number.
+ # To resolve the error, the list method should use the filter spec, to be less than 1,000 objects.
+ filter_spec = target_object.FilterSpec()
+ if self.object_moid:
+ # Make a filter for moid if you specify object_moid.
+ # The moid is a unique id, so get one object if target moid object exists in the vSphere environment.
+ if target_object is vcenter_obj.Datacenter:
+ filter_spec.datacenters = set([self.object_moid])
+
+ if target_object is vcenter_obj.Cluster:
+ filter_spec.clusters = set([self.object_moid])
+
+ if target_object is vcenter_obj.ResourcePool:
+ filter_spec.resource_pools = set([self.object_moid])
+
+ if target_object is vcenter_obj.Folder:
+ filter_spec.folders = set([self.object_moid])
+
+ if target_object is vcenter_obj.VM:
+ filter_spec.vms = set([self.object_moid])
+
+ if target_object is vcenter_obj.Network:
+ filter_spec.networks = set([self.object_moid])
+
+ if target_object is vcenter_obj.Datastore:
+ filter_spec.datastores = set([self.object_moid])
+ else:
+ # If you use object_name parameter, an object will filter with names.
+ filter_spec.names = set([self.object_name])
+
+ # Get an object for changing the object name.
+ all_vmware_objs = target_object.list(filter_spec)
+
+ # Ensure whether already exists an object in the same object_new_name name.
+ existing_obj_moid = None
+ if self.object_moid:
+ if all_vmware_objs:
+ # Ensure whether the same object name as object_new_name.
+ if all_vmware_objs[0].name == self.object_new_name:
+ existing_obj_moid = all_vmware_objs
+ else:
+ existing_obj_moid = target_object.list(target_object.FilterSpec(names=set([self.object_new_name])))
+ if existing_obj_moid:
+ # Object with same name already exists
+ results['rename_status']['current_name'] = results['rename_status']['previous_name'] = self.object_new_name
+ results['changed'] = False
+ self.module.exit_json(**results)
+
+ if not all_vmware_objs:
+ msg = "Failed to find object with %s '%s' and '%s' object type"
+ if self.object_name:
+ msg = msg % ('name', self.object_name, self.object_type)
+ elif self.object_moid:
+ msg = msg % ('moid', self.object_moid, self.object_type)
+ self.module.fail_json(msg=msg)
+
+ obj_moid = getattr(all_vmware_objs[0], valid_object_types[self.object_type][2])
+ vmware_obj = valid_object_types[self.object_type][1](obj_moid, self.soap_stub)
+
+ if not vmware_obj:
+ msg = "Failed to create VMware object with object %s %s"
+ if self.object_name:
+ msg = msg % ('name', self.object_name)
+ elif self.object_moid:
+ msg = msg % ('moid', self.object_moid)
+ self.module.fail_json(msg=msg)
+
+ try:
+ results['rename_status']['previous_name'] = vmware_obj.name
+ if not self.module.check_mode:
+ task = vmware_obj.Rename_Task(self.object_new_name)
+ wait_for_task(task)
+ changed = True
+ results['rename_status']['current_name'] = vmware_obj.name
+ except Exception as e:
+ msg = to_native(e)
+ if hasattr(e, 'msg'):
+ msg = to_native(e.msg)
+ self.module.fail_json(msg=msg)
+
+ results['changed'] = changed
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = VmwareRestClient.vmware_client_argument_spec()
+ argument_spec.update(
+ object_name=dict(),
+ object_moid=dict(),
+ new_name=dict(aliases=['object_new_name'], required=True),
+ object_type=dict(type='str', required=True, choices=['ClusterComputeResource', 'Cluster', 'Datacenter',
+ 'Datastore', 'Folder', 'Network', 'ResourcePool', 'VM',
+ 'VirtualMachine'])
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['object_name', 'object_moid'],
+ ]
+ )
+
+ vmware_object_rename = VmwareObjectRename(module)
+ vmware_object_rename.ensure_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_object_role_permission.py b/ansible_collections/community/vmware/plugins/modules/vmware_object_role_permission.py
new file mode 100644
index 000000000..98bd2b649
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_object_role_permission.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Derek Rushing <derek.rushing@geekops.com>
+# Copyright: (c) 2018, VMware, Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_object_role_permission
+short_description: Manage local roles on an ESXi host or vCenter
+description: This module can be used to manage object permissions on the given host or vCenter.
+author:
+- Derek Rushing (@kryptsi)
+- Joseph Andreatta (@vmwjoseph)
+notes:
+ - The login user must have the appropriate rights to administer permissions.
+ - Permissions for a distributed switch must be defined and managed on either the datacenter or a folder containing the switch.
+options:
+ role:
+ description:
+ - The role to be assigned permission.
+ - User can also specify role name presented in Web UI. Supported added in 1.5.0.
+ required: true
+ type: str
+ principal:
+ description:
+ - The user to be assigned permission.
+ - Required if C(group) is not specified.
+ - If specifying domain user, required separator of domain uses backslash.
+ type: str
+ group:
+ description:
+ - The group to be assigned permission.
+ - Required if C(principal) is not specified.
+ type: str
+ object_name:
+ description:
+ - The object name to assigned permission.
+ type: str
+ required: true
+ object_type:
+ description:
+ - The object type being targeted.
+ default: 'Folder'
+ choices: ['Folder', 'VirtualMachine', 'Datacenter', 'ResourcePool',
+ 'Datastore', 'Network', 'HostSystem', 'ComputeResource',
+ 'ClusterComputeResource', 'DistributedVirtualSwitch',
+ 'DistributedVirtualPortgroup', 'StoragePod']
+ type: str
+ recursive:
+ description:
+ - Should the permissions be recursively applied.
+ default: true
+ type: bool
+ state:
+ description:
+ - Indicate desired state of the object's permission.
+ - When C(state=present), the permission will be added if it doesn't already exist.
+ - When C(state=absent), the permission is removed if it exists.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Assign user to VM folder
+ community.vmware.vmware_object_role_permission:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ role: Admin
+ principal: user_bob
+ object_name: services
+ state: present
+ delegate_to: localhost
+
+- name: Remove user from VM folder
+ community.vmware.vmware_object_role_permission:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ role: Admin
+ principal: user_bob
+ object_name: services
+ state: absent
+ delegate_to: localhost
+
+- name: Assign finance group to VM folder
+ community.vmware.vmware_object_role_permission:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ role: Limited Users
+ group: finance
+ object_name: Accounts
+ state: present
+ delegate_to: localhost
+
+- name: Assign view_user Read Only permission at root folder
+ community.vmware.vmware_object_role_permission:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ role: ReadOnly
+ principal: view_user
+ object_name: rootFolder
+ state: present
+ delegate_to: localhost
+
+- name: Assign domain user to VM folder
+ community.vmware.vmware_object_role_permission:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ role: Admin
+ principal: "vsphere.local\\domainuser"
+ object_name: services
+ state: present
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+changed:
+ description: whether or not a change was made to the object's role
+ returned: always
+ type: bool
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_obj
+
+
+class VMwareObjectRolePermission(PyVmomi):
+ def __init__(self, module):
+ super(VMwareObjectRolePermission, self).__init__(module)
+ self.module = module
+ self.params = module.params
+ self.is_group = False
+ self.role_list = {}
+ self.role = None
+ self.auth_manager = self.content.authorizationManager
+ self.populate_role_list()
+
+ if self.params.get('principal', None) is not None:
+ self.applied_to = self.params['principal']
+ elif self.params.get('group', None) is not None:
+ self.applied_to = self.params['group']
+ self.is_group = True
+
+ self.get_role()
+ self.get_object()
+ self.get_perms()
+ self.perm = self.setup_permission()
+ self.state = self.params['state']
+
+ def populate_role_list(self):
+ user_friendly_role_names = {
+ 'Admin': ['Administrator'],
+ 'ReadOnly': ['Read-Only'],
+ 'com.vmware.Content.Admin': [
+ 'Content library administrator (sample)',
+ 'Content library administrator'
+ ],
+ 'NoCryptoAdmin': ['No cryptography administrator'],
+ 'NoAccess': ['No access'],
+ 'VirtualMachinePowerUser': [
+ 'Virtual machine power user (sample)',
+ 'Virtual machine power user'
+ ],
+ 'VirtualMachineUser': [
+ 'Virtual machine user (sample)',
+ 'Virtual machine user'
+ ],
+ 'ResourcePoolAdministrator': [
+ 'Resource pool administrator (sample)',
+ 'Resource pool administrator'
+ ],
+ 'VMwareConsolidatedBackupUser': [
+ 'VMware Consolidated Backup user (sample)',
+ 'VMware Consolidated Backup user'
+ ],
+ 'DatastoreConsumer': [
+ 'Datastore consumer (sample)',
+ 'Datastore consumer'
+ ],
+ 'NetworkConsumer': [
+ 'Network administrator (sample)',
+ 'Network administrator'
+ ],
+ 'VirtualMachineConsoleUser': ['Virtual Machine console user'],
+ 'InventoryService.Tagging.TaggingAdmin': ['Tagging Admin'],
+ }
+ for role in self.auth_manager.roleList:
+ self.role_list[role.name] = role
+ if user_friendly_role_names.get(role.name):
+ for role_name in user_friendly_role_names[role.name]:
+ self.role_list[role_name] = role
+
+ def get_perms(self):
+ self.current_perms = self.auth_manager.RetrieveEntityPermissions(self.current_obj, False)
+
+ def same_permission(self, perm_one, perm_two):
+ return perm_one.principal.lower() == perm_two.principal.lower() \
+ and perm_one.roleId == perm_two.roleId
+
+ def get_state(self):
+ for perm in self.current_perms:
+ if self.same_permission(self.perm, perm):
+ return 'present'
+ return 'absent'
+
+ def process_state(self):
+ local_permission_states = {
+ 'absent': {
+ 'present': self.remove_permission,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_exit_unchanged,
+ 'absent': self.add_permission,
+ }
+ }
+ try:
+ local_permission_states[self.state][self.get_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def setup_permission(self):
+ perm = vim.AuthorizationManager.Permission()
+ perm.entity = self.current_obj
+ perm.group = self.is_group
+ perm.principal = self.applied_to
+ perm.roleId = self.role.roleId
+ perm.propagate = self.params['recursive']
+ return perm
+
+ def add_permission(self):
+ if not self.module.check_mode:
+ self.auth_manager.SetEntityPermissions(self.current_obj, [self.perm])
+ self.module.exit_json(changed=True)
+
+ def remove_permission(self):
+ if not self.module.check_mode:
+ self.auth_manager.RemoveEntityPermission(self.current_obj, self.applied_to, self.is_group)
+ self.module.exit_json(changed=True)
+
+ def get_role(self):
+ self.role = self.role_list.get(self.params['role'], None)
+ if not self.role:
+ self.module.fail_json(msg="Specified role (%s) was not found" % self.params['role'])
+
+ def get_object(self):
+ # find_obj doesn't include rootFolder
+ if self.params['object_type'] == 'Folder' and self.params['object_name'] == 'rootFolder':
+ self.current_obj = self.content.rootFolder
+ return
+ try:
+ getattr(vim, self.params['object_type'])
+ except AttributeError:
+ self.module.fail_json(msg="Object type %s is not valid." % self.params['object_type'])
+ self.current_obj = find_obj(content=self.content,
+ vimtype=[getattr(vim, self.params['object_type'])],
+ name=self.params['object_name'])
+
+ if self.current_obj is None:
+ self.module.fail_json(
+ msg="Specified object %s of type %s was not found."
+ % (self.params['object_name'], self.params['object_type'])
+ )
+ if self.params['object_type'] == 'DistributedVirtualSwitch':
+ msg = "You are applying permissions to a Distributed vSwitch. " \
+ "This will probably fail, since Distributed vSwitches inherits permissions " \
+ "from the datacenter or a folder level. " \
+ "Define permissions on the datacenter or the folder containing the switch."
+ self.module.warn(msg)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ role=dict(required=True, type='str'),
+ object_name=dict(required=True, type='str'),
+ object_type=dict(
+ type='str',
+ default='Folder',
+ choices=[
+ 'Folder',
+ 'VirtualMachine',
+ 'Datacenter',
+ 'ResourcePool',
+ 'Datastore',
+ 'Network',
+ 'HostSystem',
+ 'ComputeResource',
+ 'ClusterComputeResource',
+ 'DistributedVirtualSwitch',
+ 'DistributedVirtualPortgroup',
+ 'StoragePod',
+ ],
+ ),
+ principal=dict(type='str'),
+ group=dict(type='str'),
+ recursive=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['principal', 'group']
+ ],
+ required_one_of=[
+ ['principal', 'group']
+ ],
+ )
+
+ vmware_object_permission = VMwareObjectRolePermission(module)
+ vmware_object_permission.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_object_role_permission_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_object_role_permission_info.py
new file mode 100644
index 000000000..08b668798
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_object_role_permission_info.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Derek Rushing <derek.rushing@geekops.com>
+# Copyright: (c) 2018, VMware, Inc.
+# Copyright: (c) 2021, Ansible Project
+# Copyright: (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+---
+module: vmware_object_role_permission_info
+short_description: Gather information about object's permissions
+description: This module can be used to gather object permissions on the given VMware object.
+author:
+- Abhijeet Kasurde (@Akasurde)
+notes:
+ - The ESXi or vCenter login user must have the appropriate rights to administer permissions.
+ - Supports check mode.
+options:
+ principal:
+ description:
+ - The optional name of an entity, such as a user, assigned permissions on an object.
+ - If provided, actual permissions on the specified object are returned for the principal, instead of roles.
+ type: str
+ required: false
+ object_name:
+ description:
+ - The object name to assigned permission.
+ - Mutually exclusive with I(moid).
+ type: str
+ object_type:
+ description:
+ - The object type being targeted.
+ default: 'Folder'
+ choices: ['Folder', 'VirtualMachine', 'Datacenter', 'ResourcePool',
+ 'Datastore', 'Network', 'HostSystem', 'ComputeResource',
+ 'ClusterComputeResource', 'DistributedVirtualSwitch',
+ 'DistributedVirtualPortgroup', 'StoragePod']
+ type: str
+ moid:
+ description:
+ - Managed object ID for the given object.
+ - Mutually exclusive with I(object_name).
+ aliases: ['object_moid']
+ type: 'str'
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+"""
+
+EXAMPLES = r"""
+- name: Gather role information about Datastore
+ community.vmware.vmware_object_role_permission_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ object_name: ds_200
+ object_type: Datastore
+
+- name: Gather permissions on Datastore for a User
+ community.vmware.vmware_object_role_permission_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: false
+ principal: some.user@company.com
+ object_name: ds_200
+ object_type: Datastore
+"""
+
+RETURN = r"""
+permission_info:
+ description: information about object's permission
+ returned: always
+ type: list
+ sample: [
+ {
+ "principal": "VSPHERE.LOCAL\\vpxd-extension-12e0b667-892c-4694-8a5e-f13147e45dbd",
+ "propagate": true,
+ "role_id": -1,
+ "role_name": "Admin"
+ }
+ ]
+"""
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ vmware_argument_spec,
+ find_obj,
+)
+
+
+class VMwareObjectRolePermission(PyVmomi):
+ def __init__(self, module):
+ super(VMwareObjectRolePermission, self).__init__(module)
+ self.module = module
+ self.params = module.params
+ self.role_list = {}
+ self.auth_manager = self.content.authorizationManager
+
+ self.principal = self.params.get('principal')
+ self.get_object()
+ self.get_perms()
+ self.populate_role_list()
+ self.populate_permission_list()
+
+ def populate_permission_list(self):
+ results = []
+ if self.principal is None:
+ for permission in self.current_perms:
+ results.append(
+ {
+ "principal": permission.principal,
+ "role_name": self.role_list.get(permission.roleId, ""),
+ "role_id": permission.roleId,
+ "propagate": permission.propagate,
+ }
+ )
+ else:
+ results = self.to_json(self.current_perms)
+ self.module.exit_json(changed=False, permission_info=results)
+
+ def populate_role_list(self):
+ user_friendly_role_names = {
+ "Admin": ["Administrator"],
+ "ReadOnly": ["Read-Only"],
+ "com.vmware.Content.Admin": [
+ "Content library administrator (sample)",
+ "Content library administrator",
+ ],
+ "NoCryptoAdmin": ["No cryptography administrator"],
+ "NoAccess": ["No access"],
+ "VirtualMachinePowerUser": [
+ "Virtual machine power user (sample)",
+ "Virtual machine power user",
+ ],
+ "VirtualMachineUser": [
+ "Virtual machine user (sample)",
+ "Virtual machine user",
+ ],
+ "ResourcePoolAdministrator": [
+ "Resource pool administrator (sample)",
+ "Resource pool administrator",
+ ],
+ "VMwareConsolidatedBackupUser": [
+ "VMware Consolidated Backup user (sample)",
+ "VMware Consolidated Backup user",
+ ],
+ "DatastoreConsumer": ["Datastore consumer (sample)", "Datastore consumer"],
+ "NetworkConsumer": [
+ "Network administrator (sample)",
+ "Network administrator",
+ ],
+ "VirtualMachineConsoleUser": ["Virtual Machine console user"],
+ "InventoryService.Tagging.TaggingAdmin": ["Tagging Admin"],
+ }
+ for role in self.content.authorizationManager.roleList:
+ self.role_list[role.roleId] = role.name
+ if user_friendly_role_names.get(role.name):
+ for role_name in user_friendly_role_names[role.name]:
+ self.role_list[role.roleId] = role_name
+
+ def get_perms(self):
+ if self.principal is None:
+ self.current_perms = self.auth_manager.RetrieveEntityPermissions(
+ self.current_obj, True
+ )
+ else:
+ moid_list = []
+ moid_list.append(self.current_obj)
+ self.current_perms = self.auth_manager.FetchUserPrivilegeOnEntities(
+ moid_list, self.principal
+ )
+
+ def get_object(self):
+ # find_obj doesn't include rootFolder
+ if (
+ self.params["object_type"] == "Folder" and self.params["object_name"] == "rootFolder"
+ ):
+ self.current_obj = self.content.rootFolder
+ return
+
+ vim_type = None
+ try:
+ vim_type = getattr(vim, self.params["object_type"])
+ except AttributeError:
+ pass
+ if not vim_type:
+ self.module.fail_json(
+ msg="Object type %s is not valid." % self.params["object_type"]
+ )
+
+ msg = "Specified object "
+ if "moid" in self.params and self.params["moid"]:
+ self.current_obj = vim_type(self.params["moid"], self.si._stub)
+ msg += "with moid %s of type %s" % (
+ self.params["moid"],
+ self.params["object_type"],
+ )
+ elif "object_name" in self.params and self.params["object_name"]:
+ self.current_obj = find_obj(
+ content=self.content,
+ vimtype=[vim_type],
+ name=self.params["object_name"],
+ )
+ msg = "%s of type %s" % (
+ self.params["object_name"],
+ self.params["object_type"],
+ )
+
+ if self.current_obj is None:
+ msg += "was not found"
+ self.module.fail_json(msg=msg)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ principal=dict(
+ type="str",
+ required=False
+ ),
+ object_name=dict(type="str"),
+ object_type=dict(
+ type="str",
+ default="Folder",
+ choices=[
+ "Folder",
+ "VirtualMachine",
+ "Datacenter",
+ "ResourcePool",
+ "Datastore",
+ "Network",
+ "HostSystem",
+ "ComputeResource",
+ "ClusterComputeResource",
+ "DistributedVirtualSwitch",
+ "DistributedVirtualPortgroup",
+ "StoragePod",
+ ],
+ ),
+ moid=dict(
+ type="str",
+ aliases=["object_moid"],
+ ),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ["object_name", "moid"],
+ ],
+ mutually_exclusive=[
+ ["object_name", "moid"],
+ ],
+ )
+
+ VMwareObjectRolePermission(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_portgroup.py b/ansible_collections/community/vmware/plugins/modules/vmware_portgroup.py
new file mode 100644
index 000000000..7f5e57d83
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_portgroup.py
@@ -0,0 +1,1046 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2017, Ansible Project
+# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_portgroup
+short_description: Create a VMware portgroup
+description:
+ - Create a VMware Port Group on a VMware Standard Switch (vSS) for given ESXi host(s) or hosts of given cluster.
+author:
+- Joseph Callen (@jcpowermac)
+- Russell Teague (@mtnbikenc)
+- Abhijeet Kasurde (@Akasurde)
+- Christian Kotte (@ckotte)
+options:
+ switch:
+ description:
+ - vSwitch to modify.
+ required: true
+ aliases: [ 'switch_name', 'vswitch' ]
+ type: str
+ portgroup:
+ description:
+ - Portgroup name to add.
+ required: true
+ aliases: [ 'portgroup_name' ]
+ type: str
+ vlan_id:
+ description:
+ - VLAN ID to assign to portgroup.
+ - Set to 0 (no VLAN tagging) by default.
+ required: false
+ default: 0
+ aliases: [ 'vlan' ]
+ type: int
+ security:
+ description:
+ - Network policy specifies layer 2 security settings for a
+ portgroup such as promiscuous mode, where guest adapter listens
+ to all the packets, MAC address changes and forged transmits.
+ - Dict which configures the different security values for portgroup.
+ suboptions:
+ promiscuous_mode:
+ type: bool
+ description: Indicates whether promiscuous mode is allowed.
+ forged_transmits:
+ type: bool
+ description: Indicates whether forged transmits are allowed.
+ mac_changes:
+ type: bool
+ description: Indicates whether mac changes are allowed.
+ required: false
+ aliases: [ 'security_policy', 'network_policy' ]
+ type: dict
+ teaming:
+ description:
+ - Dictionary which configures the different teaming values for portgroup.
+ suboptions:
+ load_balancing:
+ type: str
+ description:
+ - Network adapter teaming policy.
+ choices: [ loadbalance_ip, loadbalance_srcmac, loadbalance_srcid, failover_explicit ]
+ aliases: [ 'load_balance_policy' ]
+ network_failure_detection:
+ type: str
+ description: Network failure detection.
+ choices: [ link_status_only, beacon_probing ]
+ notify_switches:
+ type: bool
+ description: Indicate whether or not to notify the physical switch if a link fails.
+ failback:
+ type: bool
+ description: Indicate whether or not to use a failback when restoring links.
+ active_adapters:
+ type: list
+ description:
+ - List of active adapters used for load balancing.
+ - All vmnics are used as active adapters if C(active_adapters) and C(standby_adapters) are not defined.
+ elements: str
+ standby_adapters:
+ type: list
+ description:
+ - List of standby adapters used for failover.
+ - All vmnics are used as active adapters if C(active_adapters) and C(standby_adapters) are not defined.
+ elements: str
+ required: false
+ aliases: [ 'teaming_policy' ]
+ type: dict
+ traffic_shaping:
+ description:
+ - Dictionary which configures traffic shaping for the switch.
+ suboptions:
+ enabled:
+ type: bool
+ description: Status of Traffic Shaping Policy.
+ average_bandwidth:
+ type: int
+ description: Average bandwidth (kbit/s).
+ peak_bandwidth:
+ type: int
+ description: Peak bandwidth (kbit/s).
+ burst_size:
+ type: int
+ description: Burst size (KB).
+ required: false
+ type: dict
+ cluster_name:
+ description:
+ - Name of cluster name for host membership.
+ - Portgroup will be created on all hosts of the given cluster.
+ - This option is required if C(hosts) is not specified.
+ aliases: [ 'cluster' ]
+ type: str
+ hosts:
+ description:
+ - List of name of host or hosts on which portgroup needs to be added.
+ - This option is required if C(cluster_name) is not specified.
+ aliases: [ esxi_hostname ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Determines if the portgroup should be present or not.
+ choices:
+ - 'present'
+ - 'absent'
+ default: present
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add Management Network VM Portgroup
+ community.vmware.vmware_portgroup:
+ hostname: "{{ esxi_hostname }}"
+ username: "{{ esxi_username }}"
+ password: "{{ esxi_password }}"
+ switch: "{{ vswitch_name }}"
+ portgroup: "{{ portgroup_name }}"
+ vlan_id: "{{ vlan_id }}"
+ delegate_to: localhost
+
+- name: Add Portgroup with Promiscuous Mode Enabled
+ community.vmware.vmware_portgroup:
+ hostname: "{{ esxi_hostname }}"
+ username: "{{ esxi_username }}"
+ password: "{{ esxi_password }}"
+ switch: "{{ vswitch_name }}"
+ portgroup: "{{ portgroup_name }}"
+ security:
+ promiscuous_mode: true
+ delegate_to: localhost
+
+- name: Add Management Network VM Portgroup to specific hosts
+ community.vmware.vmware_portgroup:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ hosts: [esxi_hostname_one]
+ switch: "{{ vswitch_name }}"
+ portgroup: "{{ portgroup_name }}"
+ vlan_id: "{{ vlan_id }}"
+ delegate_to: localhost
+
+- name: Add Management Network VM Portgroup to all hosts in a cluster
+ community.vmware.vmware_portgroup:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ cluster_name: "{{ cluster_name }}"
+ switch: "{{ vswitch_name }}"
+ portgroup: "{{ portgroup_name }}"
+ vlan_id: "{{ vlan_id }}"
+ delegate_to: localhost
+
+- name: Remove Management Network VM Portgroup to all hosts in a cluster
+ community.vmware.vmware_portgroup:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ cluster_name: "{{ cluster_name }}"
+ switch: "{{ vswitch_name }}"
+ portgroup: "{{ portgroup_name }}"
+ vlan_id: "{{ vlan_id }}"
+ state: absent
+ delegate_to: localhost
+
+- name: Add Portgroup with all settings defined
+ community.vmware.vmware_portgroup:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ inventory_hostname }}"
+ switch: "{{ vswitch_name }}"
+ portgroup: "{{ portgroup_name }}"
+ vlan_id: 10
+ security:
+ promiscuous_mode: false
+ mac_changes: false
+ forged_transmits: false
+ traffic_shaping:
+ enabled: true
+ average_bandwidth: 100000
+ peak_bandwidth: 100000
+ burst_size: 102400
+ teaming:
+ load_balancing: failover_explicit
+ network_failure_detection: link_status_only
+ notify_switches: true
+ failback: true
+ active_adapters:
+ - vmnic0
+ standby_adapters:
+ - vmnic1
+ delegate_to: localhost
+ register: teaming_result
+'''
+
+RETURN = r'''
+result:
+ description: metadata about the portgroup
+ returned: always
+ type: dict
+ sample: {
+ "esxi01.example.com": {
+ "changed": true,
+ "failback": "No override",
+ "failover_active": "No override",
+ "failover_standby": "No override",
+ "failure_detection": "No override",
+ "load_balancing": "No override",
+ "msg": "Port Group added",
+ "notify_switches": "No override",
+ "portgroup": "vMotion",
+ "sec_forged_transmits": false,
+ "sec_mac_changes": false,
+ "sec_promiscuous_mode": false,
+ "traffic_shaping": "No override",
+ "vlan_id": 33,
+ "vswitch": "vSwitch1"
+ }
+ }
+'''
+
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VMwareHostPortGroup(PyVmomi):
+ """Manage portgroup"""
+
+ def __init__(self, module):
+ super(VMwareHostPortGroup, self).__init__(module)
+ self.switch_object = None
+ self.portgroup_object = None
+ hosts = self.params['hosts']
+ cluster = self.params['cluster_name']
+ self.portgroup = self.params['portgroup']
+ self.switch = self.params['switch']
+ self.vlan_id = self.params['vlan_id']
+ if self.params['security']:
+ self.sec_promiscuous_mode = self.params['security'].get('promiscuous_mode')
+ self.sec_forged_transmits = self.params['security'].get('forged_transmits')
+ self.sec_mac_changes = self.params['security'].get('mac_changes')
+ else:
+ self.sec_promiscuous_mode = None
+ self.sec_forged_transmits = None
+ self.sec_mac_changes = None
+ if self.params['traffic_shaping']:
+ self.ts_enabled = self.params['traffic_shaping'].get('enabled')
+ if self.ts_enabled is True:
+ for value in ['average_bandwidth', 'peak_bandwidth', 'burst_size']:
+ if not self.params['traffic_shaping'].get(value):
+ self.module.fail_json(msg="traffic_shaping.%s is a required parameter if traffic_shaping is enabled." % value)
+ self.ts_average_bandwidth = self.params['traffic_shaping'].get('average_bandwidth')
+ self.ts_peak_bandwidth = self.params['traffic_shaping'].get('peak_bandwidth')
+ self.ts_burst_size = self.params['traffic_shaping'].get('burst_size')
+ else:
+ self.ts_enabled = None
+ self.ts_average_bandwidth = None
+ self.ts_peak_bandwidth = None
+ self.ts_burst_size = None
+ if self.params['teaming']:
+ self.teaming_load_balancing = self.params['teaming'].get('load_balancing')
+ self.teaming_failure_detection = self.params['teaming'].get('network_failure_detection')
+ self.teaming_notify_switches = self.params['teaming'].get('notify_switches')
+ self.teaming_failback = self.params['teaming'].get('failback')
+ self.teaming_failover_order_active = self.params['teaming'].get('active_adapters')
+ self.teaming_failover_order_standby = self.params['teaming'].get('standby_adapters')
+ if self.teaming_failover_order_active is None:
+ self.teaming_failover_order_active = []
+ if self.teaming_failover_order_standby is None:
+ self.teaming_failover_order_standby = []
+ else:
+ self.teaming_load_balancing = None
+ self.teaming_failure_detection = None
+ self.teaming_notify_switches = None
+ self.teaming_failback = None
+ self.teaming_failover_order_active = None
+ self.teaming_failover_order_standby = None
+ self.state = self.params['state']
+
+ self.hosts = self.get_all_host_objs(cluster_name=cluster, esxi_host_name=hosts)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system with given configuration.")
+
+ def process_state(self):
+ """Manage internal state of the portgroup"""
+ results = dict(changed=False, result=dict())
+ host_change_list = []
+ for host in self.hosts:
+ changed = False
+ results['result'][host.name] = dict()
+ switch_state = self.check_if_vswitch_exists(host_system=host)
+ if switch_state == 'absent':
+ self.module.fail_json(msg="The vSwitch '%s' doesn't exist on host '%s'" % (self.switch, host.name))
+ portgroup_state = self.check_if_portgroup_exists(host_system=host)
+ if self.state == 'present' and portgroup_state == 'present':
+ changed, host_results = self.update_host_port_group(
+ host_system=host,
+ portgroup_object=self.portgroup_object
+ )
+ elif self.state == 'present' and portgroup_state == 'absent':
+ changed, host_results = self.create_host_port_group(host_system=host)
+ elif self.state == 'absent' and portgroup_state == 'present':
+ changed, host_results = self.remove_host_port_group(host_system=host)
+ else:
+ host_results = dict()
+ host_results['changed'] = False
+ host_results['msg'] = "Port Group already deleted"
+ host_results['portgroup'] = self.portgroup
+ results['result'][host.name] = host_results
+
+ host_change_list.append(changed)
+
+ if any(host_change_list):
+ results['changed'] = True
+ self.module.exit_json(**results)
+
+ def check_if_portgroup_exists(self, host_system):
+ """
+ Check if portgroup exists
+ Returns: 'present' if portgroup exists or 'absent' if not
+ """
+ self.portgroup_object = self.find_portgroup_by_name(
+ host_system=host_system,
+ portgroup_name=self.portgroup,
+ vswitch_name=self.switch
+ )
+ if self.portgroup_object is None:
+ return 'absent'
+ return 'present'
+
+ def find_portgroup_by_name(self, host_system, portgroup_name, vswitch_name):
+ """
+ Find and return port group managed object
+ Args:
+ host_system: Name of Host System
+ portgroup_name: Name of the Port Group
+ vswitch_name: Name of the vSwitch
+
+ Returns: Port Group managed object if found, else None
+ """
+ portgroups = self.get_all_port_groups_by_host(host_system=host_system)
+ for portgroup in portgroups:
+ if portgroup.spec.name == portgroup_name and portgroup.spec.vswitchName != vswitch_name:
+ # portgroup names are unique; there can be only one portgroup with the same name per host
+ self.module.fail_json(msg="The portgroup already exists on vSwitch '%s'" % portgroup.spec.vswitchName)
+ if portgroup.spec.name == portgroup_name and portgroup.spec.vswitchName == vswitch_name:
+ return portgroup
+ return None
+
+ def check_if_vswitch_exists(self, host_system):
+ """
+ Check if vSwitch exists
+ Returns: 'present' if vSwitch exists or 'absent' if not
+ """
+ self.switch_object = self.find_vswitch_by_name(
+ host_system=host_system,
+ vswitch_name=self.switch
+ )
+ if self.switch_object is None:
+ return 'absent'
+ return 'present'
+
+ @staticmethod
+ def find_vswitch_by_name(host_system, vswitch_name):
+ """
+ Find and return vSwitch managed object
+ Args:
+ host: Host system managed object
+ vswitch_name: Name of vSwitch to find
+
+ Returns: vSwitch managed object if found, else None
+ """
+ for vss in host_system.configManager.networkSystem.networkInfo.vswitch:
+ if vss.name == vswitch_name:
+ return vss
+ return None
+
+ def remove_host_port_group(self, host_system):
+ """
+ Remove a Port Group from a given host
+ Args:
+ host_system: Name of Host System
+ """
+ host_results = dict(changed=False, msg="")
+
+ if self.module.check_mode:
+ host_results['msg'] = "Port Group would be removed"
+ else:
+ try:
+ host_system.configManager.networkSystem.RemovePortGroup(pgName=self.portgroup)
+ host_results['msg'] = "Port Group removed"
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(
+ msg="Failed to remove Portgroup as it was not found: %s" % to_native(not_found.msg)
+ )
+ except vim.fault.ResourceInUse as resource_in_use:
+ self.module.fail_json(
+ msg="Failed to remove Portgroup as it is in use: %s" % to_native(resource_in_use.msg)
+ )
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(
+ msg="Failed to remove Portgroup due to configuration failures: %s" % to_native(host_config_fault.msg)
+ )
+ host_results['changed'] = True
+ host_results['portgroup'] = self.portgroup
+ host_results['vswitch'] = self.switch
+
+ return True, host_results
+
+ def create_host_port_group(self, host_system):
+ """Create Port Group on a given host
+ Args:
+ host_system: Name of Host System
+ """
+ host_results = dict(changed=False, msg="")
+
+ if self.module.check_mode:
+ host_results['msg'] = "Port Group would be added"
+ else:
+ port_group = vim.host.PortGroup.Config()
+ port_group.spec = vim.host.PortGroup.Specification()
+ port_group.spec.vswitchName = self.switch
+ port_group.spec.name = self.portgroup
+ port_group.spec.vlanId = self.vlan_id
+ port_group.spec.policy = self.create_network_policy()
+
+ try:
+ host_system.configManager.networkSystem.AddPortGroup(portgrp=port_group.spec)
+ host_results['changed'] = True
+ host_results['msg'] = "Port Group added"
+ except vim.fault.AlreadyExists as already_exists:
+ self.module.fail_json(
+ msg="Failed to add Portgroup as it already exists: %s" % to_native(already_exists.msg)
+ )
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(
+ msg="Failed to add Portgroup as vSwitch was not found: %s" % to_native(not_found.msg)
+ )
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(
+ msg="Failed to add Portgroup due to host system configuration failure : %s" %
+ to_native(host_config_fault.msg)
+ )
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to add Portgroup as VLAN id was not correct as per specifications: %s" %
+ to_native(invalid_argument.msg)
+ )
+ host_results['changed'] = True
+ host_results['portgroup'] = self.portgroup
+ host_results['vswitch'] = self.switch
+ host_results['vlan_id'] = self.vlan_id
+ if self.sec_promiscuous_mode is None:
+ host_results['sec_promiscuous_mode'] = "No override"
+ else:
+ host_results['sec_promiscuous_mode'] = self.sec_promiscuous_mode
+ if self.sec_mac_changes is None:
+ host_results['sec_mac_changes'] = "No override"
+ else:
+ host_results['sec_mac_changes'] = self.sec_mac_changes
+ if self.sec_forged_transmits is None:
+ host_results['sec_forged_transmits'] = "No override"
+ else:
+ host_results['sec_forged_transmits'] = self.sec_forged_transmits
+ host_results['traffic_shaping'] = "No override" if self.ts_enabled is None else self.ts_enabled
+ host_results['load_balancing'] = "No override" if self.teaming_load_balancing is None \
+ else self.teaming_load_balancing
+ host_results['notify_switches'] = "No override" if self.teaming_notify_switches is None \
+ else self.teaming_notify_switches
+ host_results['failback'] = "No override" if self.teaming_failback is None else self.teaming_failback
+ host_results['failover_active'] = "No override" if self.teaming_failover_order_active is None \
+ else self.teaming_failover_order_active
+ host_results['failover_standby'] = "No override" if self.teaming_failover_order_standby is None \
+ else self.teaming_failover_order_standby
+ host_results['failure_detection'] = "No override" if self.teaming_failure_detection is None \
+ else self.teaming_failure_detection
+
+ return True, host_results
+
+ def update_host_port_group(self, host_system, portgroup_object):
+ """Update a Port Group on a given host
+ Args:
+ host_system: Name of Host System
+ """
+ changed = changed_security = False
+ changed_list = []
+ host_results = dict(changed=False, msg="")
+ spec = portgroup_object.spec
+ message = ''
+
+ # Check VLAN ID
+ host_results['vlan_id'] = self.vlan_id
+ if spec.vlanId != self.vlan_id:
+ changed = True
+ changed_list.append("VLAN ID")
+ host_results['vlan_id_previous'] = spec.vlanId
+ spec.vlanId = self.vlan_id
+
+ # Check security settings
+ if self.sec_promiscuous_mode is None:
+ host_results['sec_promiscuous_mode'] = "No override"
+ else:
+ host_results['sec_promiscuous_mode'] = self.sec_promiscuous_mode
+ if self.sec_mac_changes is None:
+ host_results['sec_mac_changes'] = "No override"
+ else:
+ host_results['sec_mac_changes'] = self.sec_mac_changes
+ if self.sec_forged_transmits is None:
+ host_results['sec_forged_transmits'] = "No override"
+ else:
+ host_results['sec_forged_transmits'] = self.sec_forged_transmits
+ if spec.policy.security:
+ promiscuous_mode_previous = spec.policy.security.allowPromiscuous
+ mac_changes_previous = spec.policy.security.macChanges
+ forged_transmits_previous = spec.policy.security.forgedTransmits
+ if promiscuous_mode_previous is not self.sec_promiscuous_mode:
+ spec.policy.security.allowPromiscuous = self.sec_promiscuous_mode
+ changed = changed_security = True
+ changed_list.append("Promiscuous mode")
+ if mac_changes_previous is not self.sec_mac_changes:
+ spec.policy.security.macChanges = self.sec_mac_changes
+ changed = changed_security = True
+ changed_list.append("MAC address changes")
+ if forged_transmits_previous is not self.sec_forged_transmits:
+ spec.policy.security.forgedTransmits = self.sec_forged_transmits
+ changed = changed_security = True
+ changed_list.append("Forged transmits")
+ if changed_security:
+ if self.sec_promiscuous_mode is None:
+ host_results['sec_promiscuous_mode_previous'] = "No override"
+ else:
+ host_results['sec_promiscuous_mode_previous'] = promiscuous_mode_previous
+ if self.sec_mac_changes is None:
+ host_results['sec_mac_changes_previous'] = "No override"
+ else:
+ host_results['sec_mac_changes'] = mac_changes_previous
+ if self.sec_forged_transmits is None:
+ host_results['sec_forged_transmits_previous'] = "No override"
+ else:
+ host_results['sec_forged_transmits_previous'] = forged_transmits_previous
+ else:
+ spec.policy.security = self.create_security_policy()
+ changed = True
+ changed_list.append("Security")
+ host_results['sec_promiscuous_mode_previous'] = "No override"
+ host_results['sec_mac_changes_previous'] = "No override"
+ host_results['sec_forged_transmits_previous'] = "No override"
+
+ # Check traffic shaping
+ if self.ts_enabled is None:
+ host_results['traffic_shaping'] = "No override"
+ else:
+ host_results['traffic_shaping'] = self.ts_enabled
+ if self.ts_enabled:
+ ts_average_bandwidth = self.ts_average_bandwidth * 1000
+ ts_peak_bandwidth = self.ts_peak_bandwidth * 1000
+ ts_burst_size = self.ts_burst_size * 1024
+ host_results['traffic_shaping_avg_bandw'] = ts_average_bandwidth
+ host_results['traffic_shaping_peak_bandw'] = ts_peak_bandwidth
+ host_results['traffic_shaping_burst'] = ts_burst_size
+ if spec.policy.shapingPolicy and spec.policy.shapingPolicy.enabled is not None:
+ if spec.policy.shapingPolicy.enabled:
+ if self.ts_enabled:
+ if spec.policy.shapingPolicy.averageBandwidth != ts_average_bandwidth:
+ changed = True
+ changed_list.append("Average bandwidth")
+ host_results['traffic_shaping_avg_bandw_previous'] = spec.policy.shapingPolicy.averageBandwidth
+ spec.policy.shapingPolicy.averageBandwidth = ts_average_bandwidth
+ if spec.policy.shapingPolicy.peakBandwidth != ts_peak_bandwidth:
+ changed = True
+ changed_list.append("Peak bandwidth")
+ host_results['traffic_shaping_peak_bandw_previous'] = spec.policy.shapingPolicy.peakBandwidth
+ spec.policy.shapingPolicy.peakBandwidth = ts_peak_bandwidth
+ if spec.policy.shapingPolicy.burstSize != ts_burst_size:
+ changed = True
+ changed_list.append("Burst size")
+ host_results['traffic_shaping_burst_previous'] = spec.policy.shapingPolicy.burstSize
+ spec.policy.shapingPolicy.burstSize = ts_burst_size
+ elif self.ts_enabled is False:
+ changed = True
+ changed_list.append("Traffic shaping")
+ host_results['traffic_shaping_previous'] = True
+ spec.policy.shapingPolicy.enabled = False
+ elif self.ts_enabled is None:
+ spec.policy.shapingPolicy = None
+ changed = True
+ changed_list.append("Traffic shaping")
+ host_results['traffic_shaping_previous'] = True
+ else:
+ if self.ts_enabled:
+ spec.policy.shapingPolicy = self.create_shaping_policy()
+ changed = True
+ changed_list.append("Traffic shaping")
+ host_results['traffic_shaping_previous'] = False
+ elif self.ts_enabled is None:
+ spec.policy.shapingPolicy = None
+ changed = True
+ changed_list.append("Traffic shaping")
+ host_results['traffic_shaping_previous'] = True
+ else:
+ if self.ts_enabled:
+ spec.policy.shapingPolicy = self.create_shaping_policy()
+ changed = True
+ changed_list.append("Traffic shaping")
+ host_results['traffic_shaping_previous'] = "No override"
+ elif self.ts_enabled is False:
+ changed = True
+ changed_list.append("Traffic shaping")
+ host_results['traffic_shaping_previous'] = "No override"
+ spec.policy.shapingPolicy.enabled = False
+
+ # Check teaming
+ if spec.policy.nicTeaming:
+ # Check teaming policy
+ if self.teaming_load_balancing is None:
+ host_results['load_balancing'] = "No override"
+ else:
+ host_results['load_balancing'] = self.teaming_load_balancing
+ if spec.policy.nicTeaming.policy:
+ if spec.policy.nicTeaming.policy != self.teaming_load_balancing:
+ changed = True
+ changed_list.append("Load balancing")
+ host_results['load_balancing_previous'] = spec.policy.nicTeaming.policy
+ spec.policy.nicTeaming.policy = self.teaming_load_balancing
+ else:
+ if self.teaming_load_balancing:
+ changed = True
+ changed_list.append("Load balancing")
+ host_results['load_balancing_previous'] = "No override"
+ spec.policy.nicTeaming.policy = self.teaming_load_balancing
+ # Check teaming notify switches
+ if spec.policy.nicTeaming.notifySwitches is None:
+ host_results['notify_switches'] = "No override"
+ else:
+ host_results['notify_switches'] = self.teaming_notify_switches
+ if spec.policy.nicTeaming.notifySwitches is not None:
+ if self.teaming_notify_switches is not None:
+ if spec.policy.nicTeaming.notifySwitches is not self.teaming_notify_switches:
+ changed = True
+ changed_list.append("Notify switches")
+ host_results['notify_switches_previous'] = spec.policy.nicTeaming.notifySwitches
+ spec.policy.nicTeaming.notifySwitches = self.teaming_notify_switches
+ else:
+ changed = True
+ changed_list.append("Notify switches")
+ host_results['notify_switches_previous'] = spec.policy.nicTeaming.notifySwitches
+ spec.policy.nicTeaming.notifySwitches = None
+ else:
+ if self.teaming_notify_switches is not None:
+ changed = True
+ changed_list.append("Notify switches")
+ host_results['notify_switches_previous'] = "No override"
+ spec.policy.nicTeaming.notifySwitches = self.teaming_notify_switches
+ # Check failback
+ if spec.policy.nicTeaming.rollingOrder is None:
+ host_results['failback'] = "No override"
+ else:
+ host_results['failback'] = self.teaming_failback
+ if spec.policy.nicTeaming.rollingOrder is not None:
+ if self.teaming_failback is not None:
+ # this option is called 'failback' in the vSphere Client
+ # rollingOrder also uses the opposite value displayed in the client
+ if spec.policy.nicTeaming.rollingOrder is self.teaming_failback:
+ changed = True
+ changed_list.append("Failback")
+ host_results['failback_previous'] = not spec.policy.nicTeaming.rollingOrder
+ spec.policy.nicTeaming.rollingOrder = not self.teaming_failback
+ else:
+ changed = True
+ changed_list.append("Failback")
+ host_results['failback_previous'] = spec.policy.nicTeaming.rollingOrder
+ spec.policy.nicTeaming.rollingOrder = None
+ else:
+ if self.teaming_failback is not None:
+ changed = True
+ changed_list.append("Failback")
+ host_results['failback_previous'] = "No override"
+ spec.policy.nicTeaming.rollingOrder = not self.teaming_failback
+ # Check teaming failover order
+ if self.teaming_failover_order_active is None and self.teaming_failover_order_standby is None:
+ host_results['failover_active'] = "No override"
+ host_results['failover_standby'] = "No override"
+ else:
+ host_results['failover_active'] = self.teaming_failover_order_active
+ host_results['failover_standby'] = self.teaming_failover_order_standby
+ if spec.policy.nicTeaming.nicOrder:
+ if self.teaming_failover_order_active or self.teaming_failover_order_standby:
+ if spec.policy.nicTeaming.nicOrder.activeNic != self.teaming_failover_order_active:
+ changed = True
+ changed_list.append("Failover order active")
+ host_results['failover_active_previous'] = spec.policy.nicTeaming.nicOrder.activeNic
+ spec.policy.nicTeaming.nicOrder.activeNic = self.teaming_failover_order_active
+ if spec.policy.nicTeaming.nicOrder.standbyNic != self.teaming_failover_order_standby:
+ changed = True
+ changed_list.append("Failover order standby")
+ host_results['failover_standby_previous'] = spec.policy.nicTeaming.nicOrder.standbyNic
+ spec.policy.nicTeaming.nicOrder.standbyNic = self.teaming_failover_order_standby
+ else:
+ spec.policy.nicTeaming.nicOrder = None
+ changed = True
+ changed_list.append("Failover order")
+ if hasattr(spec.policy.nicTeaming.nicOrder, 'activeNic'):
+ host_results['failover_active_previous'] = spec.policy.nicTeaming.nicOrder.activeNic
+ else:
+ host_results['failover_active_previous'] = []
+ if hasattr(spec.policy.nicTeaming.nicOrder, 'standbyNic'):
+ host_results['failover_standby_previous'] = spec.policy.nicTeaming.nicOrder.standbyNic
+ else:
+ host_results['failover_standby_previous'] = []
+ else:
+ if self.teaming_failover_order_active or self.teaming_failover_order_standby:
+ changed = True
+ changed_list.append("Failover order")
+ host_results['failover_active_previous'] = "No override"
+ host_results['failover_standby_previous'] = "No override"
+ spec.policy.nicTeaming.nicOrder = self.create_nic_order_policy()
+ # Check teaming failure detection
+ if self.teaming_failure_detection is None:
+ host_results['failure_detection'] = "No override"
+ else:
+ host_results['failure_detection'] = self.teaming_failure_detection
+ if spec.policy.nicTeaming.failureCriteria and spec.policy.nicTeaming.failureCriteria.checkBeacon is not None:
+ if self.teaming_failure_detection == "link_status_only":
+ if spec.policy.nicTeaming.failureCriteria.checkBeacon is True:
+ changed = True
+ changed_list.append("Network failure detection")
+ host_results['failure_detection_previous'] = "beacon_probing"
+ spec.policy.nicTeaming.failureCriteria.checkBeacon = False
+ elif self.teaming_failure_detection == "beacon_probing":
+ if spec.policy.nicTeaming.failureCriteria.checkBeacon is False:
+ changed = True
+ changed_list.append("Network failure detection")
+ host_results['failure_detection_previous'] = "link_status_only"
+ spec.policy.nicTeaming.failureCriteria.checkBeacon = True
+ elif spec.policy.nicTeaming.failureCriteria.checkBeacon is not None:
+ changed = True
+ changed_list.append("Network failure detection")
+ host_results['failure_detection_previous'] = spec.policy.nicTeaming.failureCriteria.checkBeacon
+ spec.policy.nicTeaming.failureCriteria = None
+ else:
+ if self.teaming_failure_detection:
+ spec.policy.nicTeaming.failureCriteria = self.create_nic_failure_policy()
+ changed = True
+ changed_list.append("Network failure detection")
+ host_results['failure_detection_previous'] = "No override"
+ else:
+ spec.policy.nicTeaming = self.create_teaming_policy()
+ if spec.policy.nicTeaming:
+ changed = True
+ changed_list.append("Teaming and failover")
+ host_results['load_balancing_previous'] = "No override"
+ host_results['notify_switches_previous'] = "No override"
+ host_results['failback_previous'] = "No override"
+ host_results['failover_active_previous'] = "No override"
+ host_results['failover_standby_previous'] = "No override"
+ host_results['failure_detection_previous'] = "No override"
+
+ if changed:
+ if self.module.check_mode:
+ changed_suffix = ' would be changed'
+ else:
+ changed_suffix = ' changed'
+ if len(changed_list) > 2:
+ message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
+ elif len(changed_list) == 2:
+ message = ' and '.join(changed_list)
+ elif len(changed_list) == 1:
+ message = changed_list[0]
+ message += changed_suffix
+ if not self.module.check_mode:
+ try:
+ host_system.configManager.networkSystem.UpdatePortGroup(
+ pgName=self.portgroup,
+ portgrp=spec
+ )
+ except vim.fault.AlreadyExists as already_exists:
+ self.module.fail_json(
+ msg="Failed to update Portgroup as it would conflict with an existing port group: %s" %
+ to_native(already_exists.msg)
+ )
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(
+ msg="Failed to update Portgroup as vSwitch was not found: %s" %
+ to_native(not_found.msg)
+ )
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(
+ msg="Failed to update Portgroup due to host system configuration failure : %s" %
+ to_native(host_config_fault.msg)
+ )
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to update Port Group '%s', this can be due to either of following :"
+ " 1. VLAN id was not correct as per specifications, 2. Network policy is invalid : %s" %
+ (self.portgroup, to_native(invalid_argument.msg))
+ )
+ else:
+ message = "Port Group already configured properly"
+ host_results['changed'] = changed
+ host_results['msg'] = message
+ host_results['portgroup'] = self.portgroup
+ host_results['vswitch'] = self.switch
+
+ return changed, host_results
+
+ def create_network_policy(self):
+ """
+ Create a Network Policy
+ Returns: Network Policy object
+ """
+ security_policy = None
+ shaping_policy = None
+ teaming_policy = None
+
+ # Only configure security policy if an option is defined
+ if not all(option is None for option in [self.sec_promiscuous_mode,
+ self.sec_mac_changes,
+ self.sec_forged_transmits]):
+ security_policy = self.create_security_policy()
+ if self.ts_enabled:
+ shaping_policy = self.create_shaping_policy()
+ teaming_policy = self.create_teaming_policy()
+
+ network_policy = vim.host.NetworkPolicy(
+ security=security_policy,
+ nicTeaming=teaming_policy,
+ shapingPolicy=shaping_policy
+ )
+
+ return network_policy
+
+ def create_security_policy(self):
+ """
+ Create a Security Policy
+ Returns: Security Policy object
+ """
+ security_policy = vim.host.NetworkPolicy.SecurityPolicy()
+ security_policy.allowPromiscuous = self.sec_promiscuous_mode
+ security_policy.macChanges = self.sec_mac_changes
+ security_policy.forgedTransmits = self.sec_forged_transmits
+ return security_policy
+
+ def create_shaping_policy(self):
+ """
+ Create a Traffic Shaping Policy
+ Returns: Traffic Shaping Policy object
+ """
+ shaping_policy = vim.host.NetworkPolicy.TrafficShapingPolicy()
+ shaping_policy.enabled = self.ts_enabled
+ shaping_policy.averageBandwidth = self.ts_average_bandwidth * 1000
+ shaping_policy.peakBandwidth = self.ts_peak_bandwidth * 1000
+ shaping_policy.burstSize = self.ts_burst_size * 1024
+ return shaping_policy
+
+ def create_teaming_policy(self):
+ """
+ Create a NIC Teaming Policy
+ Returns: NIC Teaming Policy object
+ """
+ # Only configure teaming policy if an option is defined
+ if not all(option is None for option in [self.teaming_load_balancing,
+ self.teaming_failure_detection,
+ self.teaming_notify_switches,
+ self.teaming_failback,
+ self.teaming_failover_order_active,
+ self.teaming_failover_order_standby]):
+ teaming_policy = vim.host.NetworkPolicy.NicTeamingPolicy()
+ teaming_policy.policy = self.teaming_load_balancing
+ teaming_policy.reversePolicy = True
+ teaming_policy.notifySwitches = self.teaming_notify_switches
+ if self.teaming_failback is None:
+ teaming_policy.rollingOrder = None
+ else:
+ teaming_policy.rollingOrder = not self.teaming_failback
+ if self.teaming_failover_order_active is None and self.teaming_failover_order_standby is None:
+ teaming_policy.nicOrder = None
+ else:
+ teaming_policy.nicOrder = self.create_nic_order_policy()
+ if self.teaming_failure_detection is None:
+ teaming_policy.failureCriteria = None
+ else:
+ teaming_policy.failureCriteria = self.create_nic_failure_policy()
+ return teaming_policy
+ return None
+
+ def create_nic_order_policy(self):
+ """
+ Create a NIC order Policy
+ Returns: NIC order Policy object
+ """
+ for active_nic in self.teaming_failover_order_active:
+ if active_nic not in self.switch_object.spec.bridge.nicDevice:
+ self.module.fail_json(
+ msg="NIC '%s' (active) is not configured on vSwitch '%s'" % (active_nic, self.switch)
+ )
+ for standby_nic in self.teaming_failover_order_standby:
+ if standby_nic not in self.switch_object.spec.bridge.nicDevice:
+ self.module.fail_json(
+ msg="NIC '%s' (standby) is not configured on vSwitch '%s'" % (standby_nic, self.switch)
+ )
+ nic_order = vim.host.NetworkPolicy.NicOrderPolicy()
+ nic_order.activeNic = self.teaming_failover_order_active
+ nic_order.standbyNic = self.teaming_failover_order_standby
+ return nic_order
+
+ def create_nic_failure_policy(self):
+ """
+ Create a NIC Failure Criteria Policy
+ Returns: NIC Failure Criteria Policy object
+ """
+ failure_criteria = vim.host.NetworkPolicy.NicFailureCriteria()
+ if self.teaming_failure_detection == "link_status_only":
+ failure_criteria.checkBeacon = False
+ elif self.teaming_failure_detection == "beacon_probing":
+ failure_criteria.checkBeacon = True
+ elif self.teaming_failure_detection is None:
+ failure_criteria = None
+ # The following properties are deprecated since VI API 5.1. Default values are used
+ failure_criteria.fullDuplex = False
+ failure_criteria.percentage = 0
+ failure_criteria.checkErrorPercent = False
+ failure_criteria.checkDuplex = False
+ failure_criteria.speed = 10
+ failure_criteria.checkSpeed = 'minimum'
+ return failure_criteria
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ portgroup=dict(type='str', required=True, aliases=['portgroup_name']),
+ switch=dict(type='str', required=True, aliases=['switch_name', 'vswitch']),
+ vlan_id=dict(type='int', required=False, default=0, aliases=['vlan']),
+ hosts=dict(type='list', aliases=['esxi_hostname'], elements='str'),
+ cluster_name=dict(type='str', aliases=['cluster']),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ security=dict(
+ type='dict',
+ options=dict(
+ promiscuous_mode=dict(type='bool'),
+ forged_transmits=dict(type='bool'),
+ mac_changes=dict(type='bool'),
+ ),
+ aliases=['security_policy', 'network_policy']
+ ),
+ traffic_shaping=dict(
+ type='dict',
+ options=dict(
+ enabled=dict(type='bool'),
+ average_bandwidth=dict(type='int'),
+ peak_bandwidth=dict(type='int'),
+ burst_size=dict(type='int'),
+ ),
+ ),
+ teaming=dict(
+ type='dict',
+ options=dict(
+ load_balancing=dict(
+ type='str',
+ choices=[
+ 'loadbalance_ip',
+ 'loadbalance_srcmac',
+ 'loadbalance_srcid',
+ 'failover_explicit',
+ ],
+ aliases=['load_balance_policy'],
+ ),
+ network_failure_detection=dict(
+ type='str',
+ choices=['link_status_only', 'beacon_probing']
+ ),
+ notify_switches=dict(type='bool'),
+ failback=dict(type='bool'),
+ active_adapters=dict(type='list', elements='str'),
+ standby_adapters=dict(type='list', elements='str'),
+ ),
+ aliases=['teaming_policy']
+ ),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'hosts'],
+ ],
+ supports_check_mode=True
+ )
+
+ try:
+ host_portgroup = VMwareHostPortGroup(module)
+ host_portgroup.process_state()
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=to_native(method_fault.msg))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_portgroup_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_portgroup_info.py
new file mode 100644
index 000000000..b94be38db
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_portgroup_info.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_portgroup_info
+short_description: Gathers info about an ESXi host's Port Group configuration
+description:
+- This module can be used to gather information about an ESXi host's Port Group configuration when ESXi hostname or Cluster name is given.
+author:
+- Abhijeet Kasurde (@Akasurde)
+- Christian Kotte (@ckotte)
+options:
+ policies:
+ description:
+ - Gather information about Security, Traffic Shaping, as well as Teaming and failover.
+ - The property C(ts) stands for Traffic Shaping and C(lb) for Load Balancing.
+ type: bool
+ default: false
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - Info will be returned for all hostsystem belonging to this cluster name.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname to gather information from.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather portgroup info about all ESXi Host in given Cluster
+ community.vmware.vmware_portgroup_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ delegate_to: localhost
+
+- name: Gather portgroup info about ESXi Host system
+ community.vmware.vmware_portgroup_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+hosts_portgroup_info:
+ description: metadata about host's portgroup configuration
+ returned: on success
+ type: dict
+ sample: {
+ "esx01": [
+ {
+ "failback": true,
+ "failover_active": ["vmnic0", "vmnic1"],
+ "failover_standby": [],
+ "failure_detection": "link_status_only",
+ "lb": "loadbalance_srcid",
+ "notify": true,
+ "portgroup": "Management Network",
+ "security": [false, false, false],
+ "ts": "No override",
+ "vlan_id": 0,
+ "vswitch": "vSwitch0"
+ },
+ {
+ "failback": true,
+ "failover_active": ["vmnic2"],
+ "failover_standby": ["vmnic3"],
+ "failure_detection": "No override",
+ "lb": "No override",
+ "notify": true,
+ "portgroup": "vMotion",
+ "security": [false, false, false],
+ "ts": "No override",
+ "vlan_id": 33,
+ "vswitch": "vSwitch1"
+ }
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class PortgroupInfoManager(PyVmomi):
+ """Class to manage Port Group info"""
+
+ def __init__(self, module):
+ super(PortgroupInfoManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system.")
+ self.policies = self.params.get('policies')
+
+ @staticmethod
+ def normalize_pg_info(portgroup_obj, policy_info):
+ """Create Port Group information"""
+ pg_info_dict = dict()
+ spec = portgroup_obj.spec
+ pg_info_dict['portgroup'] = spec.name
+ pg_info_dict['vlan_id'] = spec.vlanId
+ pg_info_dict['vswitch'] = spec.vswitchName
+
+ if policy_info:
+ # Security info
+ if spec.policy.security:
+ promiscuous_mode = spec.policy.security.allowPromiscuous
+ mac_changes = spec.policy.security.macChanges
+ forged_transmits = spec.policy.security.forgedTransmits
+ pg_info_dict['security'] = (
+ ["No override" if promiscuous_mode is None else promiscuous_mode,
+ "No override" if mac_changes is None else mac_changes,
+ "No override" if forged_transmits is None else forged_transmits]
+ )
+ else:
+ pg_info_dict['security'] = ["No override", "No override", "No override"]
+
+ # Traffic Shaping info
+ if spec.policy.shapingPolicy and spec.policy.shapingPolicy.enabled is not None:
+ pg_info_dict['ts'] = portgroup_obj.spec.policy.shapingPolicy.enabled
+ else:
+ pg_info_dict['ts'] = "No override"
+
+ # Teaming and failover info
+ if spec.policy.nicTeaming:
+ if spec.policy.nicTeaming.policy is None:
+ pg_info_dict['lb'] = "No override"
+ else:
+ pg_info_dict['lb'] = spec.policy.nicTeaming.policy
+ if spec.policy.nicTeaming.notifySwitches is None:
+ pg_info_dict['notify'] = "No override"
+ else:
+ pg_info_dict['notify'] = spec.policy.nicTeaming.notifySwitches
+ if spec.policy.nicTeaming.rollingOrder is None:
+ pg_info_dict['failback'] = "No override"
+ else:
+ pg_info_dict['failback'] = not spec.policy.nicTeaming.rollingOrder
+ if spec.policy.nicTeaming.nicOrder is None:
+ pg_info_dict['failover_active'] = "No override"
+ pg_info_dict['failover_standby'] = "No override"
+ else:
+ pg_info_dict['failover_active'] = spec.policy.nicTeaming.nicOrder.activeNic
+ pg_info_dict['failover_standby'] = spec.policy.nicTeaming.nicOrder.standbyNic
+ if spec.policy.nicTeaming.failureCriteria is None:
+ pg_info_dict['failure_detection'] = "No override"
+ else:
+ if spec.policy.nicTeaming.failureCriteria.checkBeacon:
+ pg_info_dict['failure_detection'] = "beacon_probing"
+ else:
+ pg_info_dict['failure_detection'] = "link_status_only"
+ else:
+ pg_info_dict['lb'] = "No override"
+ pg_info_dict['notify'] = "No override"
+ pg_info_dict['failback'] = "No override"
+ pg_info_dict['failover_active'] = "No override"
+ pg_info_dict['failover_standby'] = "No override"
+ pg_info_dict['failure_detection'] = "No override"
+
+ return pg_info_dict
+
+ def gather_host_portgroup_info(self):
+ """Gather Port Group info per ESXi host"""
+ hosts_pg_info = dict()
+ for host in self.hosts:
+ pgs = host.config.network.portgroup
+ hosts_pg_info[host.name] = []
+ for portgroup in pgs:
+ hosts_pg_info[host.name].append(
+ self.normalize_pg_info(portgroup_obj=portgroup, policy_info=self.policies)
+ )
+ return hosts_pg_info
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ policies=dict(type='bool', required=False, default=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ host_pg_mgr = PortgroupInfoManager(module)
+ module.exit_json(changed=False, hosts_portgroup_info=host_pg_mgr.gather_host_portgroup_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_recommended_datastore.py b/ansible_collections/community/vmware/plugins/modules/vmware_recommended_datastore.py
new file mode 100644
index 000000000..2bf9f30f5
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_recommended_datastore.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+---
+module: vmware_recommended_datastore
+short_description: Returns the recommended datastore from a SDRS-enabled datastore cluster
+description:
+- This module provides the recommended datastore name from a datastore cluster only if the SDRS is enabled for the specified datastore cluster
+author:
+- Unknown (@MalfuncEddie)
+- Alina Buzachis (@alinabuzachis)
+- Abhijeet Kasurde (@Akasurde)
+notes:
+- Supports Check mode.
+options:
+ datacenter:
+ description:
+ - Name of the datacenter.
+ type: str
+ required: true
+ datastore_cluster:
+ description:
+ - Name of the datastore cluster.
+ type: str
+ required: true
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+"""
+
+
+EXAMPLES = r"""
+- name: Get recommended datastore from a Storage DRS-enabled datastore cluster
+ community.vmware.vmware_recommended_datastore:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ validate_certs: false
+ datastore_cluster: '{{ datastore_cluster_name }}'
+ datacenter: '{{ datacenter }}'
+ register: recommended_ds
+"""
+
+
+RETURN = r"""
+recommended_datastore:
+ description: metadata about the recommended datastore
+ returned: always
+ type: str
+ sample: {
+ 'recommended_datastore': 'datastore-01'
+ }
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ vmware_argument_spec,
+)
+
+
+class VmwareDatastoreClusterInfo(PyVmomi):
+ def __init__(self, module):
+ super(VmwareDatastoreClusterInfo, self).__init__(module)
+ self.module = module
+ self.params = module.params
+ datacenter_name = self.params.get("datacenter")
+ datacenter_obj = self.find_datacenter_by_name(datacenter_name)
+ if datacenter_obj is None:
+ self.module.fail_json(
+ msg="Unable to find datacenter with name %s" % datacenter_name
+ )
+ datastore_cluster_name = self.params.get("datastore_cluster")
+ datastore_cluster_obj = self.find_datastore_cluster_by_name(
+ datastore_cluster_name, datacenter=datacenter_obj
+ )
+
+ datastore_name = self.get_recommended_datastore(
+ datastore_cluster_obj=datastore_cluster_obj
+ )
+ if not datastore_name:
+ datastore_name = ""
+ result = dict(changed=False, recommended_datastore=datastore_name)
+ self.module.exit_json(**result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(type="str", required=True),
+ datastore_cluster=dict(type="str", required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ VmwareDatastoreClusterInfo(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_resource_pool.py b/ansible_collections/community/vmware/plugins/modules/vmware_resource_pool.py
new file mode 100644
index 000000000..2ed81f821
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_resource_pool.py
@@ -0,0 +1,490 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Davis Phillips davis.phillips@gmail.com
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_resource_pool
+short_description: Add/remove resource pools to/from vCenter
+description:
+ - This module can be used to add/remove a resource pool to/from vCenter
+author:
+- Davis Phillips (@dav1x)
+options:
+ datacenter:
+ description:
+ - Name of the datacenter.
+ required: true
+ type: str
+ cluster:
+ description:
+ - Name of the cluster to configure the resource pool.
+ - This parameter is required if C(esxi_hostname) or C(parent_resource_pool) is not specified.
+ - The C(cluster), C(esxi_hostname) and C(parent_resource_pool) parameters are mutually exclusive.
+ type: str
+ esxi_hostname:
+ description:
+ - Name of the host to configure the resource pool.
+ - The host must not be member of a cluster.
+ - This parameter is required if C(cluster) or C(parent_resource_pool) is not specified.
+ - The C(cluster), C(esxi_hostname) and C(parent_resource_pool) parameters are mutually exclusive.
+ type: str
+ parent_resource_pool:
+ description:
+ - Name of the parent resource pool.
+ - This parameter is required if C(cluster) or C(esxi_hostname) is not specified.
+ - The C(cluster), C(esxi_hostname) and C(parent_resource_pool) parameters are mutually exclusive.
+ type: str
+ resource_pool:
+ description:
+ - Resource pool name to manage.
+ required: true
+ type: str
+ cpu_expandable_reservations:
+ description:
+ - In a resource pool with an expandable reservation, the reservation on a resource pool can grow beyond the specified value.
+ default: true
+ type: bool
+ cpu_reservation:
+ description:
+ - Amount of resource that is guaranteed available to the virtual machine or resource pool.
+ default: 0
+ type: int
+ cpu_limit:
+ description:
+ - The utilization of a virtual machine/resource pool will not exceed this limit, even if there are available resources.
+ - The default value -1 indicates no limit.
+ default: -1
+ type: int
+ cpu_shares:
+ description:
+ - Memory shares are used in case of resource contention.
+ choices:
+ - high
+ - custom
+ - low
+ - normal
+ default: normal
+ type: str
+ cpu_allocation_shares:
+ description:
+ - The number of cpu shares allocated.
+ - This value is only set if I(cpu_shares) is set to C(custom).
+ type: int
+ default: 4000
+ mem_expandable_reservations:
+ description:
+ - In a resource pool with an expandable reservation, the reservation on a resource pool can grow beyond the specified value.
+ default: true
+ type: bool
+ mem_reservation:
+ description:
+ - Amount of resource that is guaranteed available to the virtual machine or resource pool.
+ default: 0
+ type: int
+ mem_limit:
+ description:
+ - The utilization of a virtual machine/resource pool will not exceed this limit, even if there are available resources.
+ - The default value -1 indicates no limit.
+ default: -1
+ type: int
+ mem_shares:
+ description:
+ - Memory shares are used in case of resource contention.
+ choices:
+ - high
+ - custom
+ - low
+ - normal
+ default: normal
+ type: str
+ mem_allocation_shares:
+ description:
+ - The number of memory shares allocated.
+ - This value is only set if I(mem_shares) is set to C(custom).
+ type: int
+ default: 163840
+ state:
+ description:
+ - Add or remove the resource pool
+ default: 'present'
+ choices:
+ - 'present'
+ - 'absent'
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add resource pool to vCenter
+ community.vmware.vmware_resource_pool:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: '{{ datacenter_name }}'
+ cluster: '{{ cluster_name }}'
+ resource_pool: '{{ resource_pool_name }}'
+ mem_shares: normal
+ mem_limit: -1
+ mem_reservation: 0
+ mem_expandable_reservations: true
+ cpu_shares: normal
+ cpu_limit: -1
+ cpu_reservation: 0
+ cpu_expandable_reservations: true
+ state: present
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+instance:
+ description: metadata about the new resource pool
+ returned: always
+ type: dict
+ sample: None
+resource_pool_config:
+ description: config data about the resource pool, version added 1.4.0
+ returned: always
+ type: dict
+ sample: >-
+ {
+ "_vimtype": "vim.ResourceConfigSpec",
+ "changeVersion": null,
+ "cpuAllocation": {
+ "_vimtype": "vim.ResourceAllocationInfo",
+ "expandableReservation": true,
+ "limit": -1,
+ "overheadLimit": null,
+ "reservation": 0,
+ "shares": {
+ "_vimtype": "vim.SharesInfo",
+ "level": "normal",
+ "shares": 4000
+ }
+ },
+ "entity": "vim.ResourcePool:resgroup-1108",
+ "lastModified": null,
+ "memoryAllocation": {
+ "_vimtype": "vim.ResourceAllocationInfo",
+ "expandableReservation": true,
+ "limit": -1,
+ "overheadLimit": null,
+ "reservation": 0,
+ "shares": {
+ "_vimtype": "vim.SharesInfo",
+ "level": "high",
+ "shares": 327680
+ }
+ },
+ "name": "test_pr1",
+ "scaleDescendantsShares": null
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import get_all_objs, vmware_argument_spec, find_datacenter_by_name, \
+ find_cluster_by_name, find_object_by_name, wait_for_task, find_resource_pool_by_name, PyVmomi
+from ansible.module_utils.basic import AnsibleModule
+
+
+class VMwareResourcePool(PyVmomi):
+
+ def __init__(self, module):
+ super(VMwareResourcePool, self).__init__(module)
+ self.datacenter = module.params['datacenter']
+ self.resource_pool = module.params['resource_pool']
+ self.hostname = module.params['hostname']
+ self.username = module.params['username']
+ self.password = module.params['password']
+ self.state = module.params['state']
+ self.mem_shares = module.params['mem_shares']
+ self.mem_allocation_shares = module.params['mem_allocation_shares']
+ self.mem_limit = module.params['mem_limit']
+ self.mem_reservation = module.params['mem_reservation']
+ self.mem_expandable_reservations = module.params[
+ 'mem_expandable_reservations']
+ self.cpu_shares = module.params['cpu_shares']
+ self.cpu_allocation_shares = module.params['cpu_allocation_shares']
+ self.cpu_limit = module.params['cpu_limit']
+ self.cpu_reservation = module.params['cpu_reservation']
+ self.cpu_expandable_reservations = module.params[
+ 'cpu_expandable_reservations']
+ self.parent_resource_pool = module.params['parent_resource_pool']
+ self.resource_pool_obj = None
+
+ self.dc_obj = find_datacenter_by_name(self.content, self.datacenter)
+ if self.dc_obj is None:
+ self.module.fail_json(msg="Unable to find datacenter with name %s" % self.datacenter)
+
+ if module.params['cluster']:
+ self.compute_resource_obj = find_cluster_by_name(self.content, module.params['cluster'], datacenter=self.dc_obj)
+ if self.compute_resource_obj is None:
+ self.module.fail_json(msg="Unable to find cluster with name %s" % module.params['cluster'])
+
+ if module.params['esxi_hostname']:
+ self.compute_resource_obj = find_object_by_name(self.content, module.params['esxi_hostname'], [vim.ComputeResource], folder=self.dc_obj.hostFolder)
+ if self.compute_resource_obj is None:
+ self.module.fail_json(msg="Unable to find host with name %s" % module.params['esxi_hostname'])
+
+ if module.params['parent_resource_pool']:
+ self.compute_resource_obj = find_resource_pool_by_name(self.content, module.params['parent_resource_pool'])
+ if self.compute_resource_obj is None:
+ self.module.fail_json(msg="Unable to find resource pool with name %s" % module.params['parent_resource_pool'])
+
+ def select_resource_pool(self):
+ pool_obj = None
+
+ resource_pools = get_all_objs(self.content, [vim.ResourcePool], folder=self.compute_resource_obj)
+
+ pool_selections = self.get_obj(
+ [vim.ResourcePool],
+ self.resource_pool,
+ return_all=True
+ )
+ if pool_selections:
+ for p in pool_selections:
+ if p in resource_pools:
+ pool_obj = p
+ break
+
+ return pool_obj
+
+ def get_obj(self, vimtype, name, return_all=False):
+ obj = list()
+ container = self.content.viewManager.CreateContainerView(
+ self.content.rootFolder, vimtype, True)
+
+ for c in container.view:
+ if name in [c.name, c._GetMoId()]:
+ if return_all is False:
+ return c
+ else:
+ obj.append(c)
+
+ if len(obj) > 0:
+ return obj
+ else:
+ # for backwards-compat
+ return None
+
+ def process_state(self):
+ try:
+ rp_states = {
+ 'absent': {
+ 'present': self.state_remove_rp,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_update_existing_pr,
+ 'absent': self.state_add_rp,
+ }
+ }
+
+ rp_states[self.state][self.check_rp_state()]()
+
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ def generate_rp_config(self):
+ rp_spec = vim.ResourceConfigSpec()
+ cpu_alloc = vim.ResourceAllocationInfo()
+ cpu_alloc.expandableReservation = self.cpu_expandable_reservations
+ cpu_alloc.limit = self.cpu_limit
+ cpu_alloc.reservation = self.cpu_reservation
+ cpu_alloc_shares = vim.SharesInfo()
+ if self.cpu_shares == 'custom':
+ cpu_alloc_shares.shares = self.cpu_allocation_shares
+ cpu_alloc_shares.level = self.cpu_shares
+ cpu_alloc.shares = cpu_alloc_shares
+ rp_spec.cpuAllocation = cpu_alloc
+
+ mem_alloc = vim.ResourceAllocationInfo()
+ mem_alloc.limit = self.mem_limit
+ mem_alloc.expandableReservation = self.mem_expandable_reservations
+ mem_alloc.reservation = self.mem_reservation
+ mem_alloc_shares = vim.SharesInfo()
+ if self.mem_shares == 'custom':
+ mem_alloc_shares.shares = self.mem_allocation_shares
+ mem_alloc_shares.level = self.mem_shares
+ mem_alloc.shares = mem_alloc_shares
+ rp_spec.memoryAllocation = mem_alloc
+
+ return rp_spec
+
+ def generate_rp_config_return_value(self, include_rp_config=False):
+ resource_config_return_value = {}
+ if include_rp_config:
+ resource_config_return_value = self.to_json(self.select_resource_pool().config)
+
+ resource_config_return_value['name'] = self.resource_pool
+
+ return resource_config_return_value
+
+ def state_exit_unchanged(self):
+ changed = False
+ if self.module.check_mode:
+ self.module.exit_json(changed=changed)
+
+ self.module.exit_json(changed=changed, resource_pool_config=self.generate_rp_config_return_value())
+
+ def state_update_existing_pr(self):
+ changed = False
+
+ # check the difference between the existing config and the new config
+ rp_spec = self.generate_rp_config()
+ if self.mem_shares and self.mem_shares != self.resource_pool_obj.config.memoryAllocation.shares.level:
+ changed = True
+ rp_spec.memoryAllocation.shares.level = self.mem_shares
+
+ if self.mem_allocation_shares and self.mem_shares == 'custom':
+ if self.mem_allocation_shares != self.resource_pool_obj.config.memoryAllocation.shares.shares:
+ changed = True
+ rp_spec.memoryAllocation.shares.shares = self.mem_allocation_shares
+
+ if self.mem_limit and self.mem_limit != self.resource_pool_obj.config.memoryAllocation.limit:
+ changed = True
+ rp_spec.memoryAllocation.limit = self.mem_limit
+
+ if self.mem_reservation and self.mem_reservation != self.resource_pool_obj.config.memoryAllocation.reservation:
+ changed = True
+ rp_spec.memoryAllocation.reservation = self.mem_reservation
+
+ if self.mem_expandable_reservations != self.resource_pool_obj.config.memoryAllocation.expandableReservation:
+ changed = True
+ rp_spec.memoryAllocation.expandableReservation = self.mem_expandable_reservations
+
+ if self.cpu_shares and self.cpu_shares != self.resource_pool_obj.config.cpuAllocation.shares.level:
+ changed = True
+ rp_spec.cpuAllocation.shares.level = self.cpu_shares
+
+ if self.cpu_allocation_shares and self.cpu_shares == 'custom':
+ if self.cpu_allocation_shares != self.resource_pool_obj.config.cpuAllocation.shares.shares:
+ changed = True
+ rp_spec.cpuAllocation.shares.shares = self.cpu_allocation_shares
+
+ if self.cpu_limit and self.cpu_limit != self.resource_pool_obj.config.cpuAllocation.limit:
+ changed = True
+ rp_spec.cpuAllocation.limit = self.cpu_limit
+
+ if self.cpu_reservation and self.cpu_reservation != self.resource_pool_obj.config.cpuAllocation.reservation:
+ changed = True
+ rp_spec.cpuAllocation.reservation = self.cpu_reservation
+
+ if self.cpu_expandable_reservations != self.resource_pool_obj.config.cpuAllocation.expandableReservation:
+ changed = True
+ rp_spec.cpuAllocation.expandableReservation = self.cpu_expandable_reservations
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=changed)
+
+ if changed:
+ self.resource_pool_obj.UpdateConfig(self.resource_pool, rp_spec)
+
+ resource_pool_config = self.generate_rp_config_return_value(True)
+ self.module.exit_json(changed=changed, resource_pool_config=resource_pool_config)
+
+ def state_remove_rp(self):
+ changed = True
+ result = None
+ if self.module.check_mode:
+ self.module.exit_json(changed=changed)
+
+ resource_pool_config = self.generate_rp_config_return_value(True)
+ try:
+ task = self.resource_pool_obj.Destroy()
+ success, result = wait_for_task(task)
+
+ except Exception:
+ self.module.fail_json(msg="Failed to remove resource pool '%s' '%s'" % (
+ self.resource_pool, self.resource_pool))
+ self.module.exit_json(changed=changed, resource_pool_config=resource_pool_config)
+
+ def state_add_rp(self):
+ changed = True
+ if self.module.check_mode:
+ self.module.exit_json(changed=changed)
+
+ rp_spec = self.generate_rp_config()
+
+ if self.parent_resource_pool:
+ rootResourcePool = self.compute_resource_obj
+ else:
+ rootResourcePool = self.compute_resource_obj.resourcePool
+
+ rootResourcePool.CreateResourcePool(self.resource_pool, rp_spec)
+
+ resource_pool_config = self.generate_rp_config_return_value(True)
+ self.module.exit_json(changed=changed, resource_pool_config=resource_pool_config)
+
+ def check_rp_state(self):
+ self.resource_pool_obj = self.select_resource_pool()
+ if self.resource_pool_obj is None:
+ return 'absent'
+
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(datacenter=dict(required=True, type='str'),
+ cluster=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ parent_resource_pool=dict(type='str', required=False),
+ resource_pool=dict(required=True, type='str'),
+ mem_shares=dict(type='str', default="normal", choices=[
+ 'high', 'custom', 'normal', 'low']),
+ mem_allocation_shares=dict(type='int', default=163840),
+ mem_limit=dict(type='int', default=-1),
+ mem_reservation=dict(type='int', default=0),
+ mem_expandable_reservations=dict(
+ type='bool', default="True"),
+ cpu_shares=dict(type='str', default="normal", choices=[
+ 'high', 'custom', 'normal', 'low']),
+ cpu_allocation_shares=dict(type='int', default=4000),
+ cpu_limit=dict(type='int', default=-1),
+ cpu_reservation=dict(type='int', default=0),
+ cpu_expandable_reservations=dict(
+ type='bool', default="True"),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[
+ ['mem_shares', 'custom', ['mem_allocation_shares']],
+ ['cpu_shares', 'custom', ['cpu_allocation_shares']]
+ ],
+ required_one_of=[
+ ['cluster', 'esxi_hostname', 'parent_resource_pool'],
+ ],
+ mutually_exclusive=[
+ ['cluster', 'esxi_hostname', 'parent_resource_pool'],
+ ],
+ supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_rp = VMwareResourcePool(module)
+ vmware_rp.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_resource_pool_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_resource_pool_info.py
new file mode 100644
index 000000000..82765a3b0
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_resource_pool_info.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_resource_pool_info
+short_description: Gathers info about resource pool information
+description:
+- This module can be used to gather information about all resource configuration information.
+author:
+- Abhijeet Kasurde (@Akasurde)
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather resource pool info about all resource pools available
+ community.vmware.vmware_resource_pool_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ register: rp_info
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+resource_pool_info:
+ description: metadata about resource pool configuration
+ returned: on success
+ type: list
+ sample: [
+ {
+ "cpu_allocation_expandable_reservation": false,
+ "cpu_allocation_limit": 4121,
+ "cpu_allocation_overhead_limit": null,
+ "cpu_allocation_reservation": 4121,
+ "cpu_allocation_shares": 9000,
+ "cpu_allocation_shares_level": "custom",
+ "mem_allocation_expandable_reservation": false,
+ "mem_allocation_limit": 961,
+ "mem_allocation_overhead_limit": null,
+ "mem_allocation_reservation": 961,
+ "mem_allocation_shares": 9000,
+ "mem_allocation_shares_level": "custom",
+ "name": "Resources",
+ "overall_status": "green",
+ "owner": "DC0_H0",
+ "runtime_cpu_max_usage": 4121,
+ "runtime_cpu_overall_usage": 0,
+ "runtime_cpu_reservation_used": 0,
+ "runtime_cpu_reservation_used_vm": 0,
+ "runtime_cpu_unreserved_for_pool": 4121,
+ "runtime_cpu_unreserved_for_vm": 4121,
+ "runtime_memory_max_usage": 1007681536,
+ "runtime_memory_overall_usage": 0,
+ "runtime_memory_reservation_used": 0,
+ "runtime_memory_reservation_used_vm": 0,
+ "runtime_memory_unreserved_for_pool": 1007681536,
+ "runtime_memory_unreserved_for_vm": 1007681536
+ },
+ ]
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs
+
+
+class ResourcePoolInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(ResourcePoolInfoManager, self).__init__(module)
+
+ def gather_rp_info(self):
+ resource_pool_info = []
+ rps = get_all_objs(self.content, [vim.ResourcePool])
+ for rp in rps:
+ tmp_info = dict(
+ name=rp.name,
+ cpu_allocation_reservation=rp.config.cpuAllocation.reservation,
+ cpu_allocation_expandable_reservation=rp.config.cpuAllocation.expandableReservation,
+ cpu_allocation_limit=rp.config.cpuAllocation.limit,
+ cpu_allocation_shares=rp.config.cpuAllocation.shares.shares,
+ cpu_allocation_shares_level=rp.config.cpuAllocation.shares.level,
+ cpu_allocation_overhead_limit=rp.config.cpuAllocation.overheadLimit,
+ mem_allocation_reservation=rp.config.memoryAllocation.reservation,
+ mem_allocation_expandable_reservation=rp.config.memoryAllocation.expandableReservation,
+ mem_allocation_limit=rp.config.memoryAllocation.limit,
+ mem_allocation_shares=rp.config.memoryAllocation.shares.shares,
+ mem_allocation_shares_level=rp.config.memoryAllocation.shares.level,
+ mem_allocation_overhead_limit=rp.config.memoryAllocation.overheadLimit,
+ owner=rp.owner.name,
+ overall_status=rp.summary.runtime.overallStatus,
+ runtime_cpu_reservation_used=rp.summary.runtime.cpu.reservationUsed,
+ runtime_cpu_reservation_used_vm=rp.summary.runtime.cpu.reservationUsedForVm,
+ runtime_cpu_unreserved_for_pool=rp.summary.runtime.cpu.unreservedForPool,
+ runtime_cpu_unreserved_for_vm=rp.summary.runtime.cpu.unreservedForVm,
+ runtime_cpu_overall_usage=rp.summary.runtime.cpu.overallUsage,
+ runtime_cpu_max_usage=rp.summary.runtime.cpu.maxUsage,
+ runtime_memory_reservation_used=rp.summary.runtime.memory.reservationUsed,
+ runtime_memory_reservation_used_vm=rp.summary.runtime.memory.reservationUsedForVm,
+ runtime_memory_unreserved_for_pool=rp.summary.runtime.memory.unreservedForPool,
+ runtime_memory_unreserved_for_vm=rp.summary.runtime.memory.unreservedForVm,
+ runtime_memory_overall_usage=rp.summary.runtime.memory.overallUsage,
+ runtime_memory_max_usage=rp.summary.runtime.memory.maxUsage,
+ )
+
+ resource_pool_info.append(tmp_info)
+ return resource_pool_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vmware_rp_mgr = ResourcePoolInfoManager(module)
+ module.exit_json(changed=False, resource_pool_info=vmware_rp_mgr.gather_rp_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_tag.py b/ansible_collections/community/vmware/plugins/modules/vmware_tag.py
new file mode 100644
index 000000000..cf6e07a3c
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_tag.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_tag
+short_description: Manage VMware tags
+description:
+- This module can be used to create / delete / update VMware tags.
+- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.
+- All variables and VMware object names are case sensitive.
+author:
+- Abhijeet Kasurde (@Akasurde)
+requirements:
+- vSphere Automation SDK
+options:
+ tag_name:
+ description:
+ - The name of tag to manage.
+ required: true
+ aliases: [ 'tag', 'name' ]
+ type: str
+ tag_description:
+ description:
+ - The tag description.
+ - This is required only if C(state) is set to C(present).
+ - This parameter is ignored, when C(state) is set to C(absent).
+ - Process of updating tag only allows description change.
+ required: false
+ default: ''
+ aliases: [ 'description' ]
+ type: str
+ category_id:
+ description:
+ - The unique ID generated by vCenter should be used to.
+ - User can get this unique ID from facts module.
+ - Required if C(category_name) is not set.
+ required: false
+ type: str
+ category_name:
+ description:
+ - The name of category.
+ - Required if C(category_id) is not set.
+ required: false
+ aliases: [ 'category' ]
+ type: str
+ version_added: '3.5.0'
+ state:
+ description:
+ - The state of tag.
+ - If set to C(present) and tag does not exists, then tag is created.
+ - If set to C(present) and tag exists, then tag is updated.
+ - If set to C(absent) and tag exists, then tag is deleted.
+ - If set to C(absent) and tag does not exists, no action is taken.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware_rest_client.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create a tag
+ community.vmware.vmware_tag:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ category_id: 'urn:vmomi:InventoryServiceCategory:e785088d-6981-4b1c-9fb8-1100c3e1f742:GLOBAL'
+ tag_name: Sample_Tag_0002
+ tag_description: Sample Description
+ state: present
+ delegate_to: localhost
+
+- name: Update tag description
+ community.vmware.vmware_tag:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ tag_name: Sample_Tag_0002
+ tag_description: Some fancy description
+ state: present
+ delegate_to: localhost
+
+- name: Delete tag
+ community.vmware.vmware_tag:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ tag_name: Sample_Tag_0002
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+tag_status:
+ description: dictionary of tag metadata
+ returned: on success
+ type: dict
+ sample: {
+ "msg": "Tag 'Sample_Tag_0002' created.",
+ "tag_id": "urn:vmomi:InventoryServiceTag:bff91819-f529-43c9-80ca-1c9dfda09441:GLOBAL"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+try:
+ from com.vmware.vapi.std.errors_client import Error
+except ImportError:
+ pass
+
+
+class VmwareTag(VmwareRestClient):
+ def __init__(self, module):
+ super(VmwareTag, self).__init__(module)
+ self.tag_service = self.api_client.tagging.Tag
+ self.tag_name = self.params.get('tag_name')
+ self.category_service = self.api_client.tagging.Category
+ self.category_id = self.params.get('category_id')
+
+ if self.category_id is None:
+ category_name = self.params.get('category_name')
+ category_obj = self.search_svc_object_by_name(service=self.category_service, svc_obj_name=category_name)
+ if category_obj is None:
+ self.module.fail_json(msg="Unable to find the category %s" % category_name)
+
+ self.category_id = category_obj.id
+
+ self.tag_obj = self.get_tag_by_category_id(tag_name=self.tag_name, category_id=self.category_id)
+
+ def ensure_state(self):
+ """
+ Manage internal states of tags
+
+ """
+ desired_state = self.params.get('state')
+ states = {
+ 'present': {
+ 'present': self.state_update_tag,
+ 'absent': self.state_create_tag,
+ },
+ 'absent': {
+ 'present': self.state_delete_tag,
+ 'absent': self.state_unchanged,
+ }
+ }
+ states[desired_state][self.check_tag_status()]()
+
+ def state_create_tag(self):
+ """
+ Create tag
+
+ """
+ tag_spec = self.tag_service.CreateSpec()
+ tag_spec.name = self.tag_name
+ tag_spec.description = self.params.get('tag_description')
+
+ """
+ There is no need to check if a category with the specified category_id
+ exists. The tag service will do the corresponding checks and will fail
+ if someone tries to create a tag for a category id that does not exist.
+
+ """
+ tag_spec.category_id = self.category_id
+ tag_id = ''
+ try:
+ tag_id = self.tag_service.create(tag_spec)
+ except Error as error:
+ self.module.fail_json(msg="%s" % self.get_error_message(error))
+
+ if tag_id is not None:
+ self.module.exit_json(changed=True,
+ tag_status=dict(msg="Tag '%s' created." % tag_spec.name, tag_id=tag_id))
+ self.module.exit_json(changed=False,
+ tag_status=dict(msg="No tag created", tag_id=tag_id))
+
+ def state_unchanged(self):
+ """
+ Return unchanged state
+
+ """
+ self.module.exit_json(changed=False)
+
+ def state_update_tag(self):
+ """
+ Update tag
+
+ """
+ changed = False
+ tag_id = self.tag_obj.id
+ results = dict(msg="Tag %s is unchanged." % self.tag_name,
+ tag_id=tag_id)
+ tag_desc = self.tag_obj.description
+ desired_tag_desc = self.params.get('tag_description')
+ if tag_desc != desired_tag_desc:
+ tag_update_spec = self.tag_service.UpdateSpec()
+ tag_update_spec.description = desired_tag_desc
+ try:
+ self.tag_service.update(tag_id, tag_update_spec)
+ except Error as error:
+ self.module.fail_json(msg="%s" % self.get_error_message(error))
+
+ results['msg'] = 'Tag %s updated.' % self.tag_name
+ changed = True
+
+ self.module.exit_json(changed=changed, tag_status=results)
+
+ def state_delete_tag(self):
+ """
+ Delete tag
+
+ """
+ tag_id = self.tag_obj.id
+ try:
+ self.tag_service.delete(tag_id=tag_id)
+ except Error as error:
+ self.module.fail_json(msg="%s" % self.get_error_message(error))
+
+ self.module.exit_json(changed=True,
+ tag_status=dict(msg="Tag '%s' deleted." % self.tag_name, tag_id=tag_id))
+
+ def check_tag_status(self):
+ """
+ Check if tag exists or not
+ Returns: 'present' if tag found, else 'absent'
+
+ """
+ return 'present' if self.tag_obj is not None else 'absent'
+
+
+def main():
+ argument_spec = VmwareRestClient.vmware_client_argument_spec()
+ argument_spec.update(
+ tag_name=dict(type='str', aliases=['tag', 'name'], required=True),
+ tag_description=dict(type='str', aliases=['description'], default='', required=False),
+ category_id=dict(type='str'),
+ category_name=dict(type='str', aliases=['category']),
+ state=dict(type='str', choices=['present', 'absent'], default='present', required=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ('category_id', 'category_name'),
+ ],
+ required_one_of=[
+ ['category_id', 'category_name'],
+ ]
+ )
+
+ vmware_tag = VmwareTag(module)
+ vmware_tag.ensure_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_tag_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_tag_info.py
new file mode 100644
index 000000000..25132efa8
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_tag_info.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_tag_info
+short_description: Manage VMware tag info
+description:
+- This module can be used to collect information about VMware tags.
+- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.
+- All variables and VMware object names are case sensitive.
+author:
+- Abhijeet Kasurde (@Akasurde)
+requirements:
+- vSphere Automation SDK
+extends_documentation_fragment:
+- community.vmware.vmware_rest_client.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Get info about tag
+ community.vmware.vmware_tag_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ delegate_to: localhost
+
+- name: Get category id from the given tag
+ community.vmware.vmware_tag_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ delegate_to: localhost
+ register: tag_details
+- debug:
+ msg: "{{ tag_details.tag_facts['fedora_machines']['tag_category_id'] }}"
+
+- name: Gather tag id from the given tag
+ community.vmware.vmware_tag_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ delegate_to: localhost
+ register: tag_results
+- set_fact:
+ tag_id: "{{ item.tag_id }}"
+ loop: "{{ tag_results.tag_info|json_query(query) }}"
+ vars:
+ query: "[?tag_name==`tag0001`]"
+- debug: var=tag_id
+'''
+
+RETURN = r'''
+tag_facts:
+ description: dictionary of tag metadata
+ returned: on success
+ type: dict
+ sample: {
+ "Sample_Tag_0002": {
+ "tag_category_id": "urn:vmomi:InventoryServiceCategory:6de17f28-7694-43ec-a783-d09c141819ae:GLOBAL",
+ "tag_description": "Sample Description",
+ "tag_id": "urn:vmomi:InventoryServiceTag:a141f212-0f82-4f05-8eb3-c49647c904c5:GLOBAL",
+ "tag_used_by": []
+ },
+ "fedora_machines": {
+ "tag_category_id": "urn:vmomi:InventoryServiceCategory:baa90bae-951b-4e87-af8c-be681a1ba30c:GLOBAL",
+ "tag_description": "",
+ "tag_id": "urn:vmomi:InventoryServiceTag:7d27d182-3ecd-4200-9d72-410cc6398a8a:GLOBAL",
+ "tag_used_by": []
+ },
+ "ubuntu_machines": {
+ "tag_category_id": "urn:vmomi:InventoryServiceCategory:89573410-29b4-4cac-87a4-127c084f3d50:GLOBAL",
+ "tag_description": "",
+ "tag_id": "urn:vmomi:InventoryServiceTag:7f3516d5-a750-4cb9-8610-6747eb39965d:GLOBAL",
+ "tag_used_by": []
+ }
+ }
+
+tag_info:
+ description: list of tag metadata
+ returned: on success
+ type: list
+ sample: [
+ { "tag_name": "Sample_Tag_0002",
+ "tag_category_id": "urn:vmomi:InventoryServiceCategory:6de17f28-7694-43ec-a783-d09c141819ae:GLOBAL",
+ "tag_description": "Sample Description",
+ "tag_id": "urn:vmomi:InventoryServiceTag:a141f212-0f82-4f05-8eb3-c49647c904c5:GLOBAL",
+ "tag_used_by": []
+ },
+ { "tag_name": "Sample_Tag_0002",
+ "tag_category_id": "urn:vmomi:InventoryServiceCategory:6de17f28-7694-43ec-a783-d09c141819ae:GLOBAL",
+ "tag_description": "",
+ "tag_id": "urn:vmomi:InventoryServiceTag:7d27d182-3ecd-4200-9d72-410cc6398a8a:GLOBAL",
+ "tag_used_by": []
+ },
+ { "tag_name": "ubuntu_machines",
+ "tag_category_id": "urn:vmomi:InventoryServiceCategory:89573410-29b4-4cac-87a4-127c084f3d50:GLOBAL",
+ "tag_description": "",
+ "tag_id": "urn:vmomi:InventoryServiceTag:7f3516d5-a750-4cb9-8610-6747eb39965d:GLOBAL",
+ "tag_used_by": []
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+
+
+class VmTagInfoManager(VmwareRestClient):
+ def __init__(self, module):
+ """Constructor."""
+ super(VmTagInfoManager, self).__init__(module)
+
+ def get_all_tags(self):
+ """
+ Retrieve all tag information.
+ """
+ global_tag_info = list()
+ # Backward compatability
+ global_tags = dict()
+ tag_service = self.api_client.tagging.Tag
+ for tag in tag_service.list():
+ tag_obj = tag_service.get(tag)
+ global_tags[tag_obj.name] = dict(
+ tag_description=tag_obj.description,
+ tag_used_by=tag_obj.used_by,
+ tag_category_id=tag_obj.category_id,
+ tag_id=tag_obj.id
+ )
+ global_tag_info.append(dict(
+ tag_name=tag_obj.name,
+ tag_description=tag_obj.description,
+ tag_used_by=tag_obj.used_by,
+ tag_category_id=tag_obj.category_id,
+ tag_id=tag_obj.id
+ ))
+
+ self.module.exit_json(
+ changed=False,
+ tag_facts=global_tags,
+ tag_info=global_tag_info
+ )
+
+
+def main():
+ argument_spec = VmwareRestClient.vmware_client_argument_spec()
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ vmware_tag_info = VmTagInfoManager(module)
+ vmware_tag_info.get_all_tags()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_tag_manager.py b/ansible_collections/community/vmware/plugins/modules/vmware_tag_manager.py
new file mode 100644
index 000000000..29ed32539
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_tag_manager.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_tag_manager
+short_description: Manage association of VMware tags with VMware objects
+description:
+- This module can be used to assign / remove VMware tags from the given VMware objects.
+- Tag feature is introduced in vSphere 6 version, so this module is not supported in the earlier versions of vSphere.
+- All variables and VMware object names are case sensitive.
+author:
+- Abhijeet Kasurde (@Akasurde)
+- Frederic Van Reet (@GBrawl)
+requirements:
+- vSphere Automation SDK
+options:
+ tag_names:
+ description:
+ - List of tag(s) to be managed.
+ - User can also specify category name by specifying colon separated value. For example, "category_name:tag_name".
+ - User can also specify tag and category as dict, when tag or category contains colon.
+ See example for more information. Added in version 2.10.
+ - User can skip category name if you have unique tag names.
+ required: true
+ type: list
+ elements: raw
+ state:
+ description:
+ - If C(state) is set to C(add) or C(present) will add the tags to the existing tag list of the given object.
+ - If C(state) is set to C(remove) or C(absent) will remove the tags from the existing tag list of the given object.
+ - If C(state) is set to C(set) will replace the tags of the given objects with the user defined list of tags.
+ default: add
+ choices: [ present, absent, add, remove, set ]
+ type: str
+ object_type:
+ description:
+ - Type of object to work with.
+ required: true
+ choices:
+ - VirtualMachine
+ - Datacenter
+ - ClusterComputeResource
+ - HostSystem
+ - DistributedVirtualSwitch
+ - DistributedVirtualPortgroup
+ - Datastore
+ - DatastoreCluster
+ - ResourcePool
+ - Folder
+ type: str
+ object_name:
+ description:
+ - Name of the object to work with.
+ - For DistributedVirtualPortgroups the format should be "switch_name:portgroup_name"
+ - Required if C(moid) is not set.
+ required: false
+ type: str
+ moid:
+ description:
+ - Managed object ID for the given object.
+ - Required if C(object_name) is not set.
+ required: false
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware_rest_client.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add tags to a virtual machine
+ community.vmware.vmware_tag_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ tag_names:
+ - Sample_Tag_0002
+ - Category_0001:Sample_Tag_0003
+ object_name: Fedora_VM
+ object_type: VirtualMachine
+ state: add
+ delegate_to: localhost
+
+- name: Specify tag and category as dict
+ community.vmware.vmware_tag_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ tag_names:
+ - tag: tag_0001
+ category: cat_0001
+ - tag: tag_0002
+ category: cat_0002
+ object_name: Fedora_VM
+ object_type: VirtualMachine
+ state: add
+ delegate_to: localhost
+
+- name: Remove a tag from a virtual machine
+ community.vmware.vmware_tag_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ tag_names:
+ - Sample_Tag_0002
+ object_name: Fedora_VM
+ object_type: VirtualMachine
+ state: remove
+ delegate_to: localhost
+
+- name: Add tags to a distributed virtual switch
+ community.vmware.vmware_tag_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ tag_names:
+ - Sample_Tag_0003
+ object_name: Switch_0001
+ object_type: DistributedVirtualSwitch
+ state: add
+ delegate_to: localhost
+
+- name: Add tags to a distributed virtual portgroup
+ community.vmware.vmware_tag_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ tag_names:
+ - Sample_Tag_0004
+ object_name: Switch_0001:Portgroup_0001
+ object_type: DistributedVirtualPortgroup
+ state: add
+ delegate_to: localhost
+
+
+- name: Get information about folders
+ community.vmware.vmware_folder_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: 'Asia-Datacenter1'
+ delegate_to: localhost
+ register: r
+- name: Set Managed object ID for the given folder
+ ansible.builtin.set_fact:
+ folder_mo_id: "{{ (r.flat_folder_info | selectattr('path', 'equalto', '/Asia-Datacenter1/vm/tier1/tier2') | map(attribute='moid'))[0] }}"
+- name: Add tags to a Folder using managed object id
+ community.vmware.vmware_tag_manager:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ tag_names:
+ - Sample_Cat_0004:Sample_Tag_0004
+ object_type: Folder
+ moid: "{{ folder_mo_id }}"
+ state: add
+ delegate_to: localhost
+
+'''
+
+RETURN = r'''
+tag_status:
+ description: metadata about tags related to object configuration
+ returned: on success
+ type: list
+ sample: {
+ "attached_tags": [
+ "urn:vmomi:InventoryServiceCategory:76f69e84-f6b9-4e64-954c-fac545d2c0ba:GLOBAL:security",
+ ],
+ "current_tags": [
+ "urn:vmomi:InventoryServiceCategory:927f5ff8-62e6-4364-bc94-23e3bfd7dee7:GLOBAL:backup",
+ "urn:vmomi:InventoryServiceCategory:76f69e84-f6b9-4e64-954c-fac545d2c0ba:GLOBAL:security",
+ ],
+ "detached_tags": [],
+ "previous_tags": [
+ "urn:vmomi:InventoryServiceCategory:927f5ff8-62e6-4364-bc94-23e3bfd7dee7:GLOBAL:backup",
+ ]
+ }
+'''
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (PyVmomi, find_dvs_by_name, find_dvspg_by_name)
+try:
+ from com.vmware.vapi.std_client import DynamicID
+ from com.vmware.vapi.std.errors_client import Error
+except ImportError:
+ pass
+
+
+class VmwareTagManager(VmwareRestClient):
+ def __init__(self, module):
+ """
+ Constructor
+ """
+ super(VmwareTagManager, self).__init__(module)
+ self.pyv = PyVmomi(module=module)
+
+ moid = self.params.get('moid')
+ self.object_type = self.params.get('object_type')
+ managed_object_id = None
+
+ if moid is not None:
+ managed_object_id = moid
+ else:
+ object_name = self.params.get('object_name')
+ managed_object = self.get_managed_object(object_name, self.object_type)
+
+ if managed_object is None:
+ self.module.fail_json(msg="Failed to find the managed object for %s with type %s" % (object_name, self.object_type))
+
+ if not hasattr(managed_object, '_moId'):
+ self.module.fail_json(msg="Unable to find managed object id for %s managed object" % object_name)
+
+ managed_object_id = managed_object._moId
+
+ self.dynamic_managed_object = DynamicID(type=self.object_type, id=managed_object_id)
+
+ self.tag_service = self.api_client.tagging.Tag
+ self.category_service = self.api_client.tagging.Category
+ self.tag_association_svc = self.api_client.tagging.TagAssociation
+
+ self.tag_names = self.params.get('tag_names')
+
+ def get_managed_object(self, object_name=None, object_type=None):
+ managed_object = None
+ if not all([object_type, object_name]):
+ return managed_object
+
+ if object_type == 'VirtualMachine':
+ managed_object = self.pyv.get_vm_or_template(object_name)
+
+ if object_type == 'Folder':
+ managed_object = self.pyv.find_folder_by_name(object_name)
+
+ if object_type == 'Datacenter':
+ managed_object = self.pyv.find_datacenter_by_name(object_name)
+
+ if object_type == 'Datastore':
+ managed_object = self.pyv.find_datastore_by_name(object_name)
+
+ if object_type == 'DatastoreCluster':
+ managed_object = self.pyv.find_datastore_cluster_by_name(object_name)
+ self.object_type = 'StoragePod'
+
+ if object_type == 'ClusterComputeResource':
+ managed_object = self.pyv.find_cluster_by_name(object_name)
+
+ if object_type == 'ResourcePool':
+ managed_object = self.pyv.find_resource_pool_by_name(object_name)
+
+ if object_type == 'HostSystem':
+ managed_object = self.pyv.find_hostsystem_by_name(object_name)
+
+ if object_type == 'DistributedVirtualSwitch':
+ managed_object = find_dvs_by_name(self.pyv.content, object_name)
+ self.object_type = 'VmwareDistributedVirtualSwitch'
+
+ if object_type == 'DistributedVirtualPortgroup':
+ dvs_name, pg_name = object_name.split(":", 1)
+ dv_switch = find_dvs_by_name(self.pyv.content, dvs_name)
+ if dv_switch is None:
+ self.module.fail_json(msg="A distributed virtual switch with name %s does not exist" % dvs_name)
+ managed_object = find_dvspg_by_name(dv_switch, pg_name)
+
+ return managed_object
+
+ def ensure_state(self):
+ """
+ Manage the internal state of tags
+
+ """
+ results = dict(
+ changed=False,
+ tag_status=dict(),
+ )
+ desired_tag_objs = set()
+ changed = False
+ action = self.params.get('state')
+ try:
+ current_tag_objs = self.get_tags_for_object(tag_service=self.tag_service,
+ tag_assoc_svc=self.tag_association_svc,
+ dobj=self.dynamic_managed_object,
+ tags=set())
+ except Error as error:
+ self.module.fail_json(msg="%s" % self.get_error_message(error))
+
+ results['tag_status']['previous_tags'] = ["%s:%s" % (tag_obj.category_id, tag_obj.name) for tag_obj in current_tag_objs]
+ results['tag_status']['attached_tags'] = []
+ results['tag_status']['detached_tags'] = []
+
+ # Check if category and tag combination exists as per user request
+ for tag in self.tag_names:
+ category_obj, category_name, tag_name = None, None, None
+ if isinstance(tag, dict):
+ tag_name = tag.get('tag')
+ category_name = tag.get('category')
+ if category_name is not None:
+ # User specified category
+ category_obj = self.search_svc_object_by_name(self.category_service, category_name)
+ if category_obj is None:
+ self.module.fail_json(msg="Unable to find the category %s" % category_name)
+ elif isinstance(tag, str):
+ if ":" in tag:
+ # User specified category
+ category_name, tag_name = tag.split(":", 1)
+ category_obj = self.search_svc_object_by_name(self.category_service, category_name)
+ if category_obj is None:
+ self.module.fail_json(msg="Unable to find the category %s" % category_name)
+ else:
+ # User specified only tag
+ tag_name = tag
+
+ if category_obj is not None:
+ tag_obj = self.get_tag_by_category_id(tag_name=tag_name, category_id=category_obj.id)
+ else:
+ tag_obj = self.get_tag_by_name(tag_name=tag_name)
+
+ if tag_obj is None:
+ self.module.fail_json(msg="Unable to find the tag %s" % tag_name)
+
+ desired_tag_objs.add(tag_obj)
+
+ detached_tag_objs = set()
+ attached_tag_objs = set()
+
+ if action in ('add', 'present'):
+ # Tags that need to be attached
+ tag_objs_to_attach = desired_tag_objs.difference(current_tag_objs)
+ tag_ids_to_attach = [tag_obj.id for tag_obj in tag_objs_to_attach]
+ if len(tag_ids_to_attach) > 0:
+ try:
+ self.tag_association_svc.attach_multiple_tags_to_object(object_id=self.dynamic_managed_object,
+ tag_ids=tag_ids_to_attach)
+ attached_tag_objs.update(tag_objs_to_attach)
+ current_tag_objs.update(tag_objs_to_attach)
+ changed = True
+ except Error as error:
+ self.module.fail_json(msg="%s" % self.get_error_message(error))
+
+ elif action == 'set':
+ # Tags that need to be detached
+ tag_objs_to_detach = current_tag_objs.difference(desired_tag_objs)
+ tag_ids_to_detach = [tag_obj.id for tag_obj in tag_objs_to_detach]
+ if len(tag_ids_to_detach) > 0:
+ try:
+ self.tag_association_svc.detach_multiple_tags_from_object(object_id=self.dynamic_managed_object,
+ tag_ids=tag_ids_to_detach)
+ detached_tag_objs.update(tag_objs_to_detach)
+ current_tag_objs.difference_update(tag_objs_to_detach)
+ changed = True
+ except Error as error:
+ self.module.fail_json(msg="%s" % self.get_error_message(error))
+
+ # Tags that need to be attached
+ tag_objs_to_attach = desired_tag_objs.difference(current_tag_objs)
+ tag_ids_to_attach = [tag_obj.id for tag_obj in tag_objs_to_attach]
+ if len(tag_ids_to_attach) > 0:
+ try:
+ self.tag_association_svc.attach_multiple_tags_to_object(object_id=self.dynamic_managed_object,
+ tag_ids=tag_ids_to_attach)
+ attached_tag_objs.update(tag_objs_to_attach)
+ current_tag_objs.update(tag_objs_to_attach)
+ changed = True
+ except Error as error:
+ self.module.fail_json(msg="%s" % self.get_error_message(error))
+
+ elif action in ('remove', 'absent'):
+ # Tags that need to be detached
+ tag_objs_to_detach = current_tag_objs.intersection(desired_tag_objs)
+ tag_ids_to_detach = [tag_obj.id for tag_obj in tag_objs_to_detach]
+ if len(tag_ids_to_detach) > 0:
+ try:
+ self.tag_association_svc.detach_multiple_tags_from_object(object_id=self.dynamic_managed_object,
+ tag_ids=tag_ids_to_detach)
+ detached_tag_objs.update(tag_objs_to_detach)
+ current_tag_objs.difference_update(tag_objs_to_detach)
+ changed = True
+ except Error as error:
+ self.module.fail_json(msg="%s" % self.get_error_message(error))
+
+ results['tag_status']['detached_tags'] = ["%s:%s" % (tag_obj.category_id, tag_obj.name) for tag_obj in detached_tag_objs]
+ results['tag_status']['attached_tags'] = ["%s:%s" % (tag_obj.category_id, tag_obj.name) for tag_obj in attached_tag_objs]
+ results['tag_status']['current_tags'] = ["%s:%s" % (tag_obj.category_id, tag_obj.name) for tag_obj in current_tag_objs]
+ results['changed'] = changed
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = VmwareRestClient.vmware_client_argument_spec()
+ argument_spec.update(
+ tag_names=dict(type='list', required=True, elements='raw'),
+ state=dict(type='str', choices=['absent', 'add', 'present', 'remove', 'set'], default='add'),
+ moid=dict(type='str'),
+ object_name=dict(type='str'),
+ object_type=dict(type='str', required=True, choices=['VirtualMachine', 'Datacenter', 'ClusterComputeResource',
+ 'HostSystem', 'DistributedVirtualSwitch',
+ 'DistributedVirtualPortgroup', 'Datastore', 'ResourcePool',
+ 'Folder', 'DatastoreCluster']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ('moid', 'object_name'),
+ ],
+ required_one_of=[
+ ['moid', 'object_name'],
+ ]
+ )
+
+ vmware_tag_manager = VmwareTagManager(module)
+ vmware_tag_manager.ensure_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_target_canonical_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_target_canonical_info.py
new file mode 100644
index 000000000..26aa81014
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_target_canonical_info.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_target_canonical_info
+short_description: Return canonical (NAA) from an ESXi host system
+description:
+ - This module can be used to gather information about canonical (NAA) from an ESXi host based on SCSI target ID.
+author:
+- Joseph Callen (@jcpowermac)
+- Abhijeet Kasurde (@Akasurde)
+options:
+ target_id:
+ description:
+ - The target id based on order of scsi device.
+ - version 2.6 onwards, this parameter is optional.
+ required: false
+ type: int
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - Info about all SCSI devices for all host system in the given cluster is returned.
+ - This parameter is required, if C(esxi_hostname) is not provided.
+ type: str
+ esxi_hostname:
+ description:
+ - Name of the ESXi host system.
+ - Info about all SCSI devices for the given ESXi host system is returned.
+ - This parameter is required, if C(cluster_name) is not provided.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Get Canonical name of particular target on particular ESXi host system
+ community.vmware.vmware_target_canonical_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ target_id: 7
+ esxi_hostname: esxi_hostname
+ delegate_to: localhost
+
+- name: Get Canonical name of all target on particular ESXi host system
+ community.vmware.vmware_target_canonical_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+
+- name: Get Canonical name of all ESXi hostname on particular Cluster
+ community.vmware.vmware_target_canonical_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+canonical:
+ description: metadata about SCSI Target device
+ returned: if host system and target id is given
+ type: str
+ sample: "mpx.vmhba0:C0:T0:L0"
+
+scsi_tgt_info:
+ description: metadata about all SCSI Target devices
+ returned: if host system or cluster is given
+ type: dict
+ sample: {
+ "DC0_C0_H0": {
+ "scsilun_canonical": {
+ "key-vim.host.ScsiDisk-0000000000766d686261303a303a30": "mpx.vmhba0:C0:T0:L0",
+ "key-vim.host.ScsiLun-0005000000766d686261313a303a30": "mpx.vmhba1:C0:T0:L0"
+ },
+ "target_lun_uuid": {
+ "0": "key-vim.host.ScsiDisk-0000000000766d686261303a303a30"
+ }
+ },
+ "DC0_C0_H1": {
+ "scsilun_canonical": {
+ "key-vim.host.ScsiDisk-0000000000766d686261303a303a30": "mpx.vmhba0:C0:T0:L0",
+ "key-vim.host.ScsiLun-0005000000766d686261313a303a30": "mpx.vmhba1:C0:T0:L0"
+ },
+ "target_lun_uuid": {
+ "0": "key-vim.host.ScsiDisk-0000000000766d686261303a303a30"
+ }
+ },
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class ScsiTargetInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(ScsiTargetInfoManager, self).__init__(module)
+ cluster_name = self.module.params.get('cluster_name')
+ self.esxi_hostname = self.module.params.get('esxi_hostname')
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=self.esxi_hostname)
+
+ def gather_scsi_device_info(self):
+ """
+ Function to gather information about SCSI target devices
+
+ """
+ scsi_tgt_info = {}
+ target_lun_uuid = {}
+ scsilun_canonical = {}
+ target_id = self.module.params['target_id']
+
+ for host in self.hosts:
+ # Associate the scsiLun key with the canonicalName (NAA)
+ for scsilun in host.config.storageDevice.scsiLun:
+ scsilun_canonical[scsilun.key] = scsilun.canonicalName
+
+ # Associate target number with LUN uuid
+ for target in host.config.storageDevice.scsiTopology.adapter[0].target:
+ for lun in target.lun:
+ target_lun_uuid[target.target] = lun.scsiLun
+
+ scsi_tgt_info[host.name] = dict(
+ scsilun_canonical=scsilun_canonical,
+ target_lun_uuid=target_lun_uuid)
+
+ if target_id is not None and self.esxi_hostname is not None:
+ canonical = ''
+ temp_lun_data = scsi_tgt_info[self.esxi_hostname]['target_lun_uuid']
+ if self.esxi_hostname in scsi_tgt_info and \
+ target_id in temp_lun_data:
+ temp_scsi_data = scsi_tgt_info[self.esxi_hostname]['scsilun_canonical']
+ temp_target = temp_lun_data[target_id]
+ canonical = temp_scsi_data[temp_target]
+ self.module.exit_json(changed=False, canonical=canonical)
+
+ self.module.exit_json(changed=False, scsi_tgt_info=scsi_tgt_info)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ target_id=dict(required=False, type='int'),
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True,
+ )
+
+ scsi_tgt_manager = ScsiTargetInfoManager(module)
+ scsi_tgt_manager.gather_scsi_device_info()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vc_infraprofile_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_vc_infraprofile_info.py
new file mode 100644
index 000000000..7a50da32b
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vc_infraprofile_info.py
@@ -0,0 +1,285 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019-2020, Ansible Project
+# Copyright: (c) 2019-2020, Naveenkumar G P <ngp@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vc_infraprofile_info
+short_description: List and Export VMware vCenter infra profile configs.
+description:
+- Module to manage VMware vCenter infra profile configs.
+- vCenter infra profile Library feature is introduced in vSphere 7.0 version, so this module is not supported in the earlier versions of vSphere.
+- All variables and VMware object names are case sensitive.
+author:
+- Naveenkumar G P (@ngp)
+requirements:
+- vSphere Automation SDK
+options:
+ decryption_key:
+ description:
+ - decryption_key argument for while doing import profile task as of now its not taken into account form API team.
+ type: str
+ required: false
+ encryption_key:
+ description:
+ - encryption_key argument for while doing import profile task as of now its not taken into account form API team.
+ type: str
+ required: false
+ api:
+ description:
+ - API which needs to be executed
+ type: str
+ required: false
+ choices: [ export, import, list, validate ]
+ config_path:
+ description:
+ - Config file path which contains infra profile config JSON data, supports both relative and absolute path.
+ - This parameter is required only when C(import),C(validate) APIs are being used.
+ type: str
+ required: false
+ profiles:
+ description:
+ - A list of profile names to be exported, imported, and validated.
+ - This parameter is not required while running for List API, not for C(export),C(import) and C(validate).
+ type: str
+ required: false
+ description:
+ description:
+ - Description of about encryption or decryption key.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.vmware.vmware_rest_client.documentation
+'''
+
+EXAMPLES = r'''
+- name: Get information about VC infraprofile
+ vmware_vc_infraprofile_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ delegate_to: localhost
+
+- name: export vCenter appliance infra profile config
+ vmware_vc_infraprofile_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ api: "export"
+ profiles: "ApplianceManagement"
+ delegate_to: localhost
+
+- name: validate vCenter appliance infra profile config
+ vmware_vc_infraprofile_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ api: "validate"
+ profiles: "ApplianceManagement"
+ config_path: "export.json"
+
+- name: import vCenter appliance infra profile config
+ vmware_vc_infraprofile_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ api: "import"
+ profiles: "ApplianceManagement"
+ config_path: "import.json"
+ delegate_to: localhost
+ '''
+
+RETURN = r'''
+list_infra:
+ description: A list of infra configs,
+ returned: on success with API as "list"
+ type: list
+ "sample": [
+ {
+ "info": "ApplianceManagement",
+ "name": "ApplianceManagement"
+ },
+ {
+ "info": "ApplianceNetwork",
+ "name": "ApplianceNetwork"
+ },
+ {
+ "info": "Authentication & Authorization Management",
+ "name": "AuthManagement"
+ }
+ ]
+
+export_infra:
+ description: A message about the exported file
+ returned: On success with API set as "export"
+ type: dict
+ sample: {
+ "export_config_json":"json exported to file"
+ }
+
+validate_infra:
+ description: A message about validate on exported file
+ returned: On success with API set as "validate"
+ type: dict
+ "sample": {
+ "changed": false,
+ "failed": false,
+ "status": "VALID"
+ }
+
+import_profile:
+ description: A message about import on import_profile spec
+ returned: On success with API set as "import"
+ type: dict
+ "sample": {
+ "changed": true,
+ "failed": false,
+ "status": "0.0"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.version import LooseVersion
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi
+import json
+import time
+
+
+class VcVersionChecker(PyVmomi):
+ def __init__(self, module):
+ super(VcVersionChecker, self).__init__(module)
+
+ def check_vc_version(self):
+ if LooseVersion(self.content.about.version) < LooseVersion('7'):
+ self.module.fail_json(msg="vCenter version is less than 7.0.0 Please specify vCenter with version greater than or equal to 7.0.0")
+
+
+class VcenterProfile(VmwareRestClient):
+
+ def __init__(self, module):
+ super(VcenterProfile, self).__init__(module)
+ self.config_path = self.params['config_path']
+
+ def list_vc_infraprofile_configs(self):
+ profile_configs_list = self.api_client.appliance.infraprofile.Configs.list()
+ config_list = []
+ for x in profile_configs_list:
+ config_list.append({'info': x.info, 'name': x.name})
+ self.module.exit_json(changed=False, infra_configs_list=config_list)
+
+ def get_profile_spec(self):
+ infra = self.api_client.appliance.infraprofile.Configs
+ profiles = {}
+ profiles = self.params['profiles'].split(",")
+ profile_spec = infra.ProfilesSpec(encryption_key="encryption_key", description="description", profiles=set(profiles))
+ return profile_spec
+
+ def vc_export_profile_task(self):
+ profile_spec = self.get_profile_spec()
+ infra = self.api_client.appliance.infraprofile.Configs
+ config_json = infra.export(spec=profile_spec)
+ if self.config_path is None:
+ self.config_path = self.params.get('api') + ".json"
+ parsed = json.loads(config_json)
+ with open(self.config_path, 'w', encoding='utf-8') as outfile:
+ json.dump(parsed, outfile, ensure_ascii=False, indent=2)
+ self.module.exit_json(changed=False, export_config_json=config_json)
+
+ def read_profile(self):
+ with open(self.config_path, "r") as file:
+ return file.read()
+
+ def get_import_profile_spec(self):
+ infra = self.api_client.appliance.infraprofile.Configs
+ config_spec = self.read_profile()
+ profile_spec = self.get_profile_spec()
+ import_profile_spec = infra.ImportProfileSpec(config_spec=config_spec, profile_spec=profile_spec)
+ return import_profile_spec
+
+ def vc_import_profile_task(self):
+ infra = self.api_client.appliance.infraprofile.Configs
+ import_profile_spec = self.get_import_profile_spec()
+ import_task = infra.import_profile_task(import_profile_spec)
+ self.wait_for_task(import_task)
+ if "SUCCEEDED" == import_task.get_info().status:
+ self.module.exit_json(changed=True, status=import_task.get_info().result.value)
+ self.module.fail_json(msg='Failed to import profile status:"%s" ' % import_task.get_info().status)
+
+ def vc_validate_profile_task(self):
+ infra = self.api_client.appliance.infraprofile.Configs
+ import_profile_spec = self.get_import_profile_spec()
+ validate_task = infra.validate_task(import_profile_spec)
+ if "VALID" == validate_task.get_info().result.get_field("status").value:
+ self.module.exit_json(changed=False, status=validate_task.get_info().result.get_field("status").value)
+ elif "INVALID" == validate_task.get_info().result.get_field("status").value:
+ # TO-DO: move to vmware_rest_client
+ self.module.exit_json(changed=False, status=validate_task.get_info().result.get_field("status").value)
+ else:
+ # TO-DO: move to vmware_rest_client
+ self.module.fail_json(msg='Failed to validate profile status:"%s" ' % dir(validate_task.get_info().status))
+
+ def wait_for_task(self, task, poll_interval=1):
+ while task.get_info().status == "RUNNING":
+ time.sleep(poll_interval)
+
+
+def main():
+ argument_spec = VmwareRestClient.vmware_client_argument_spec()
+ argument_spec.update(
+ encryption_key=dict(type='str', required=False, no_log=True),
+ description=dict(type='str', required=False),
+ decryption_key=dict(type='str', required=False, no_log=True),
+ api=dict(type='str', required=False, choices=['list', 'export', 'import', 'validate']),
+ profiles=dict(type='str', required=False),
+ config_path=dict(type='str', required=False),
+ )
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+ result = {'failed': False, 'changed': False}
+ vmware_vc_infra_profile = VcenterProfile(module)
+ vmware_vc_version = VcVersionChecker(module)
+ vmware_vc_version.check_vc_version()
+
+ if module.params['api'].lower() == "list":
+ if module.check_mode:
+ result.update(
+ changed=False, desired_operation='list_vc_profile_configs',)
+ module.exit_json(**result)
+ vmware_vc_infra_profile.list_vc_infraprofile_configs()
+ if module.params['api'].lower() == "export":
+ if module.check_mode:
+ result.update(
+ changed=False,
+ desired_operation='export_vc_profile_configs',)
+ module.exit_json(**result)
+ vmware_vc_infra_profile.vc_export_profile_task()
+
+ if module.params['api'].lower() == "import":
+ if module.check_mode:
+ result.update(
+ changed=True,
+ desired_operation='import_vc_profile_configs',
+ )
+ module.exit_json(**result)
+ vmware_vc_infra_profile.vc_import_profile_task()
+
+ if module.params['api'].lower() == "validate":
+ if module.check_mode:
+ result.update(
+ changed=True,
+ desired_operation='import_vc_profile_configs',
+ )
+ module.exit_json(**result)
+ vmware_vc_infra_profile.vc_validate_profile_task()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vcenter_settings.py b/ansible_collections/community/vmware/plugins/modules/vmware_vcenter_settings.py
new file mode 100644
index 000000000..db3a35ab0
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vcenter_settings.py
@@ -0,0 +1,980 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vcenter_settings
+short_description: Configures general settings on a vCenter server
+description:
+- This module can be used to configure the vCenter server general settings (except the statistics).
+- The statistics can be configured with the module C(vmware_vcenter_statistics).
+author:
+- Christian Kotte (@ckotte)
+options:
+ database:
+ description:
+ - The database settings for vCenter server.
+ suboptions:
+ max_connections:
+ type: int
+ description: Maximum connections.
+ default: 50
+ task_cleanup:
+ type: bool
+ description: Task cleanup.
+ default: true
+ task_retention:
+ type: int
+ description: Task retention in days.
+ default: 30
+ event_cleanup:
+ type: bool
+ description: Event cleanup.
+ default: true
+ event_retention:
+ type: int
+ description: Event retention in days.
+ default: 30
+ type: dict
+ default: {
+ max_connections: 50,
+ task_cleanup: true,
+ task_retention: 30,
+ event_cleanup: true,
+ event_retention: 30,
+ }
+ runtime_settings:
+ description:
+ - The unique runtime settings for vCenter server.
+ suboptions:
+ unique_id:
+ type: int
+ description: vCenter server unique ID.
+ managed_address:
+ type: str
+ description: vCenter server managed address.
+ vcenter_server_name:
+ type: str
+ description: vCenter server name. Default is FQDN.
+ type: dict
+ user_directory:
+ description:
+ - The user directory settings for the vCenter server installation.
+ suboptions:
+ timeout:
+ type: int
+ description: User directory timeout.
+ default: 60
+ query_limit:
+ type: bool
+ description: Query limit.
+ default: true
+ query_limit_size:
+ type: int
+ description: Query limit size.
+ default: 5000
+ validation:
+ type: bool
+ description: Mail Validation.
+ default: true
+ validation_period:
+ type: int
+ description: Validation period.
+ default: 1440
+ type: dict
+ default: {
+ timeout: 60,
+ query_limit: true,
+ query_limit_size: 5000,
+ validation: true,
+ validation_period: 1440,
+ }
+ mail:
+ description:
+ - The settings vCenter server uses to send email alerts.
+ suboptions:
+ server:
+ type: str
+ description: Mail server.
+ sender:
+ type: str
+ description: Mail sender address.
+ type: dict
+ default: {
+ server: '',
+ sender: '',
+ }
+ snmp_receivers:
+ description:
+ - SNMP trap destinations for vCenter server alerts.
+ suboptions:
+ snmp_receiver_1_url:
+ type: str
+ description: Primary Receiver ULR.
+ default: "localhost"
+ snmp_receiver_1_enabled:
+ type: bool
+ description: Enable receiver.
+ default: true
+ snmp_receiver_1_port:
+ type: int
+ description: Receiver port.
+ default: 162
+ snmp_receiver_1_community:
+ type: str
+ description: Community string.
+ default: "public"
+ snmp_receiver_2_url:
+ type: str
+ description: Receiver 2 ULR.
+ default: ""
+ snmp_receiver_2_enabled:
+ type: bool
+ description: Enable receiver.
+ default: false
+ snmp_receiver_2_port:
+ type: int
+ description: Receiver port.
+ default: 162
+ snmp_receiver_2_community:
+ type: str
+ description: Community string.
+ default: ""
+ snmp_receiver_3_url:
+ type: str
+ description: Receiver 3 ULR.
+ default: ""
+ snmp_receiver_3_enabled:
+ type: bool
+ description: Enable receiver.
+ default: false
+ snmp_receiver_3_port:
+ type: int
+ description: Receiver port.
+ default: 162
+ snmp_receiver_3_community:
+ type: str
+ description: Community string.
+ default: ""
+ snmp_receiver_4_url:
+ type: str
+ description: Receiver 4 ULR.
+ default: ""
+ snmp_receiver_4_enabled:
+ type: bool
+ description: Enable receiver.
+ default: false
+ snmp_receiver_4_port:
+ type: int
+ description: Receiver port.
+ default: 162
+ snmp_receiver_4_community:
+ type: str
+ description: Community string.
+ default: ""
+ type: dict
+ default: {
+ snmp_receiver_1_url: 'localhost',
+ snmp_receiver_1_enabled: true,
+ snmp_receiver_1_port: 162,
+ snmp_receiver_1_community: 'public',
+ snmp_receiver_2_url: '',
+ snmp_receiver_2_enabled: false,
+ snmp_receiver_2_port: 162,
+ snmp_receiver_2_community: '',
+ snmp_receiver_3_url: '',
+ snmp_receiver_3_enabled: false,
+ snmp_receiver_3_port: 162,
+ snmp_receiver_3_community: '',
+ snmp_receiver_4_url: '',
+ snmp_receiver_4_enabled: false,
+ snmp_receiver_4_port: 162,
+ snmp_receiver_4_community: '',
+ }
+ timeout_settings:
+ description:
+ - The vCenter server connection timeout for normal and long operations.
+ suboptions:
+ normal_operations:
+ type: int
+ description: Normal operation timeout.
+ default: 30
+ long_operations:
+ type: int
+ description: Long operation timeout.
+ default: 120
+ type: dict
+ default: {
+ normal_operations: 30,
+ long_operations: 120,
+ }
+ logging_options:
+ description:
+ - The level of detail that vCenter server usesfor log files.
+ type: str
+ choices: ['none', 'error', 'warning', 'info', 'verbose', 'trivia']
+ default: 'info'
+ advanced_settings:
+ description:
+ - A dictionary of advanced settings.
+ default: {}
+ type: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure vCenter general settings
+ community.vmware.vmware_vcenter_settings:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ database:
+ max_connections: 50
+ task_cleanup: true
+ task_retention: 30
+ event_cleanup: true
+ event_retention: 30
+ runtime_settings:
+ unique_id: 1
+ managed_address: "{{ lookup('dig', inventory_hostname) }}"
+ vcenter_server_name: "{{ inventory_hostname }}"
+ user_directory:
+ timeout: 60
+ query_limit: true
+ query_limit_size: 5000
+ validation: true
+ validation_period: 1440
+ mail:
+ server: mail.example.com
+ sender: vcenter@{{ inventory_hostname }}
+ snmp_receivers:
+ snmp_receiver_1_url: localhost
+ snmp_receiver_1_enabled: true
+ snmp_receiver_1_port: 162
+ snmp_receiver_1_community: public
+ timeout_settings:
+ normal_operations: 30
+ long_operations: 120
+ logging_options: info
+ delegate_to: localhost
+
+- name: Enable Retreat Mode for cluster with MOID domain-c8 (https://kb.vmware.com/kb/80472)
+ community.vmware.vmware_vcenter_settings:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ advanced_settings:
+ 'config.vcls.clusters.domain-c8.enabled': 'false'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+results:
+ description:
+ - metadata about vCenter settings
+ - supported diff mode from version 1.8.0
+ returned: always
+ type: dict
+ sample: {
+ "changed": false,
+ "db_event_cleanup": true,
+ "db_event_retention": 30,
+ "db_max_connections": 50,
+ "db_task_cleanup": true,
+ "db_task_retention": 30,
+ "directory_query_limit": true,
+ "directory_query_limit_size": 5000,
+ "directory_timeout": 60,
+ "directory_validation": true,
+ "directory_validation_period": 1440,
+ "logging_options": "info",
+ "mail_sender": "vcenter@vcenter01.example.com",
+ "mail_server": "mail.example.com",
+ "msg": "vCenter settings already configured properly",
+ "runtime_managed_address": "192.168.1.10",
+ "runtime_server_name": "vcenter01.example.com",
+ "runtime_unique_id": 1,
+ "timeout_long_operations": 120,
+ "timeout_normal_operations": 30,
+ "diff": {
+ "after": {
+ "db_event_cleanup": true,
+ "db_event_retention": 30,
+ "db_max_connections": 50,
+ "db_task_cleanup": true,
+ "db_task_retention": 30,
+ "directory_query_limit": true,
+ "directory_query_limit_size": 5000,
+ "directory_timeout": 60,
+ "directory_validation": true,
+ "directory_validation_period": 1440,
+ "logging_options": "info",
+ "mail_sender": "vcenter@vcenter01.example.com",
+ "mail_server": "mail.example.com",
+ "runtime_managed_address": "192.168.1.10",
+ "runtime_server_name": "vcenter01.example.com",
+ "runtime_unique_id": 1,
+ "snmp_receiver_1_community": "public",
+ "snmp_receiver_1_enabled": true,
+ "snmp_receiver_1_port": 162,
+ "snmp_receiver_1_url": "localhost",
+ "snmp_receiver_2_community": "",
+ "snmp_receiver_2_enabled": false,
+ "snmp_receiver_2_port": 162,
+ "snmp_receiver_2_url": "",
+ "snmp_receiver_3_community": "",
+ "snmp_receiver_3_enabled": false,
+ "snmp_receiver_3_port": 162,
+ "snmp_receiver_3_url": "",
+ "snmp_receiver_4_community": "",
+ "snmp_receiver_4_enabled": false,
+ "snmp_receiver_4_port": 162,
+ "snmp_receiver_4_url": "",
+ "timeout_long_operations": 120,
+ "timeout_normal_operations": 30
+ },
+ "before": {
+ "db_event_cleanup": true,
+ "db_event_retention": 30,
+ "db_max_connections": 50,
+ "db_task_cleanup": true,
+ "db_task_retention": 30,
+ "directory_query_limit": true,
+ "directory_query_limit_size": 5000,
+ "directory_timeout": 60,
+ "directory_validation": true,
+ "directory_validation_period": 1440,
+ "logging_options": "info",
+ "mail_sender": "vcenter@vcenter01.example.com",
+ "mail_server": "mail.example.com",
+ "runtime_managed_address": "192.168.1.10",
+ "runtime_server_name": "vcenter01.example.com",
+ "runtime_unique_id": 1,
+ "snmp_receiver_1_community": "public",
+ "snmp_receiver_1_enabled": true,
+ "snmp_receiver_1_port": 162,
+ "snmp_receiver_1_url": "localhost",
+ "snmp_receiver_2_community": "",
+ "snmp_receiver_2_enabled": false,
+ "snmp_receiver_2_port": 162,
+ "snmp_receiver_2_url": "",
+ "snmp_receiver_3_community": "",
+ "snmp_receiver_3_enabled": false,
+ "snmp_receiver_3_port": 162,
+ "snmp_receiver_3_url": "",
+ "snmp_receiver_4_community": "",
+ "snmp_receiver_4_enabled": false,
+ "snmp_receiver_4_port": 162,
+ "snmp_receiver_4_url": "",
+ "timeout_long_operations": 120,
+ "timeout_normal_operations": 30
+ }
+ }
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ try:
+ from ordereddict import OrderedDict
+ except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, option_diff, vmware_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VmwareVcenterSettings(PyVmomi):
+ """Manage settings for a vCenter server"""
+
+ def __init__(self, module):
+ super(VmwareVcenterSettings, self).__init__(module)
+
+ if not self.is_vcenter():
+ self.module.fail_json(msg="You have to connect to a vCenter server!")
+
+ self.option_manager = self.content.setting
+
+ def get_default_setting_value(self, setting_key):
+ return self.option_manager.QueryOptions(name=setting_key)[0].value
+
+ def ensure(self):
+ """Manage settings for a vCenter server"""
+ result = dict(changed=False, msg='')
+ message = ''
+
+ db_max_connections = self.params['database'].get('max_connections')
+ db_task_cleanup = self.params['database'].get('task_cleanup')
+ db_task_retention = self.params['database'].get('task_retention')
+ db_event_cleanup = self.params['database'].get('event_cleanup')
+ db_event_retention = self.params['database'].get('event_retention')
+
+ # runtime default value
+ runtime_unique_id = self.get_default_setting_value('instance.id')
+ runtime_managed_address = self.get_default_setting_value('VirtualCenter.ManagedIP')
+ runtime_server_name = self.get_default_setting_value('VirtualCenter.InstanceName')
+
+ if self.params['runtime_settings']:
+ if self.params['runtime_settings'].get('unique_id') is not None:
+ runtime_unique_id = self.params['runtime_settings'].get('unique_id')
+ if self.params['runtime_settings'].get('managed_address') is not None:
+ runtime_managed_address = self.params['runtime_settings'].get('managed_address')
+ if self.params['runtime_settings'].get('vcenter_server_name') is not None:
+ runtime_server_name = self.params['runtime_settings'].get('vcenter_server_name')
+
+ directory_timeout = self.params['user_directory'].get('timeout')
+ directory_query_limit = self.params['user_directory'].get('query_limit')
+ directory_query_limit_size = self.params['user_directory'].get('query_limit_size')
+ directory_validation = self.params['user_directory'].get('validation')
+ directory_validation_period = self.params['user_directory'].get('validation_period')
+ mail = self.params.get('mail') or {'mail': {'server': '', 'sender': ''}}
+ mail_server = mail.get('server', '')
+ mail_sender = mail.get('sender', '')
+ snmp_receiver_1_url = self.params['snmp_receivers'].get('snmp_receiver_1_url')
+ snmp_receiver_1_enabled = self.params['snmp_receivers'].get('snmp_receiver_1_enabled')
+ snmp_receiver_1_port = self.params['snmp_receivers'].get('snmp_receiver_1_port')
+ snmp_receiver_1_community = self.params['snmp_receivers'].get('snmp_receiver_1_community')
+ snmp_receiver_2_url = self.params['snmp_receivers'].get('snmp_receiver_2_url')
+ snmp_receiver_2_enabled = self.params['snmp_receivers'].get('snmp_receiver_2_enabled')
+ snmp_receiver_2_port = self.params['snmp_receivers'].get('snmp_receiver_2_port')
+ snmp_receiver_2_community = self.params['snmp_receivers'].get('snmp_receiver_2_community')
+ snmp_receiver_3_url = self.params['snmp_receivers'].get('snmp_receiver_3_url')
+ snmp_receiver_3_enabled = self.params['snmp_receivers'].get('snmp_receiver_3_enabled')
+ snmp_receiver_3_port = self.params['snmp_receivers'].get('snmp_receiver_3_port')
+ snmp_receiver_3_community = self.params['snmp_receivers'].get('snmp_receiver_3_community')
+ snmp_receiver_4_url = self.params['snmp_receivers'].get('snmp_receiver_4_url')
+ snmp_receiver_4_enabled = self.params['snmp_receivers'].get('snmp_receiver_4_enabled')
+ snmp_receiver_4_port = self.params['snmp_receivers'].get('snmp_receiver_4_port')
+ snmp_receiver_4_community = self.params['snmp_receivers'].get('snmp_receiver_4_community')
+ timeout_normal_operations = self.params['timeout_settings'].get('normal_operations')
+ timeout_long_operations = self.params['timeout_settings'].get('long_operations')
+ logging_options = self.params.get('logging_options')
+
+ changed = False
+ changed_list = []
+
+ # Check all general settings, except statistics
+ result['db_max_connections'] = db_max_connections
+ result['db_task_cleanup'] = db_task_cleanup
+ result['db_task_retention'] = db_task_retention
+ result['db_event_cleanup'] = db_event_cleanup
+ result['db_event_retention'] = db_event_retention
+ result['runtime_unique_id'] = runtime_unique_id
+ result['runtime_managed_address'] = runtime_managed_address
+ result['runtime_server_name'] = runtime_server_name
+ result['directory_timeout'] = directory_timeout
+ result['directory_query_limit'] = directory_query_limit
+ result['directory_query_limit_size'] = directory_query_limit_size
+ result['directory_validation'] = directory_validation
+ result['directory_validation_period'] = directory_validation_period
+ result['mail_server'] = mail_server
+ result['mail_sender'] = mail_sender
+ result['timeout_normal_operations'] = timeout_normal_operations
+ result['timeout_long_operations'] = timeout_long_operations
+ result['logging_options'] = logging_options
+ change_option_list = []
+
+ # Initialize diff_config variable
+ diff_config = dict(
+ before={},
+ after={}
+ )
+ for key in result.keys():
+ if key != 'changed' and key != 'msg':
+ diff_config['before'][key] = result[key]
+ diff_config['after'][key] = result[key]
+ for n in range(1, 5):
+ exec("diff_config['before']['snmp_receiver_%s_url'] = snmp_receiver_%s_url" % (n, n))
+ exec("diff_config['before']['snmp_receiver_%s_enabled'] = snmp_receiver_%s_enabled" % (n, n))
+ exec("diff_config['before']['snmp_receiver_%s_port'] = snmp_receiver_%s_port" % (n, n))
+ exec("diff_config['before']['snmp_receiver_%s_community'] = snmp_receiver_%s_community" % (n, n))
+ exec("diff_config['after']['snmp_receiver_%s_url'] = snmp_receiver_%s_url" % (n, n))
+ exec("diff_config['after']['snmp_receiver_%s_enabled'] = snmp_receiver_%s_enabled" % (n, n))
+ exec("diff_config['after']['snmp_receiver_%s_port'] = snmp_receiver_%s_port" % (n, n))
+ exec("diff_config['after']['snmp_receiver_%s_community'] = snmp_receiver_%s_community" % (n, n))
+ result['diff'] = {}
+
+ advanced_settings = self.params['advanced_settings']
+ changed_advanced_settings = option_diff(advanced_settings, self.option_manager.setting, False)
+
+ if changed_advanced_settings:
+ changed = True
+ change_option_list += changed_advanced_settings
+
+ for advanced_setting in advanced_settings:
+ result[advanced_setting] = advanced_settings[advanced_setting]
+ diff_config['before'][advanced_setting] = result[advanced_setting]
+ diff_config['after'][advanced_setting] = result[advanced_setting]
+
+ for setting in self.option_manager.setting:
+ # Database
+ if setting.key == 'VirtualCenter.MaxDBConnection' and setting.value != db_max_connections:
+ changed = True
+ changed_list.append("DB max connections")
+ result['db_max_connections_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='VirtualCenter.MaxDBConnection', value=db_max_connections)
+ )
+ diff_config['before']['db_max_connections'] = setting.value
+ if setting.key == 'task.maxAgeEnabled' and setting.value != db_task_cleanup:
+ changed = True
+ changed_list.append("DB task cleanup")
+ result['db_task_cleanup_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='task.maxAgeEnabled', value=db_task_cleanup)
+ )
+ diff_config['before']['db_task_cleanup'] = setting.value
+ if setting.key == 'task.maxAge' and setting.value != db_task_retention:
+ changed = True
+ changed_list.append("DB task retention")
+ result['db_task_retention_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='task.maxAge', value=db_task_retention)
+ )
+ diff_config['before']['db_task_retention'] = setting.value
+ if setting.key == 'event.maxAgeEnabled' and setting.value != db_event_cleanup:
+ changed = True
+ changed_list.append("DB event cleanup")
+ result['db_event_cleanup_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='event.maxAgeEnabled', value=db_event_cleanup)
+ )
+ diff_config['before']['db_event_cleanup'] = setting.value
+ if setting.key == 'event.maxAge' and setting.value != db_event_retention:
+ changed = True
+ changed_list.append("DB event retention")
+ result['db_event_retention_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='event.maxAge', value=db_event_retention)
+ )
+ diff_config['before']['db_event_retention'] = setting.value
+ # Runtime settings
+ if setting.key == 'instance.id' and setting.value != runtime_unique_id:
+ changed = True
+ changed_list.append("Instance ID")
+ result['runtime_unique_id_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='instance.id', value=runtime_unique_id)
+ )
+ diff_config['before']['runtime_unique_id'] = setting.value
+ if setting.key == 'VirtualCenter.ManagedIP' and setting.value != runtime_managed_address:
+ changed = True
+ changed_list.append("Managed IP")
+ result['runtime_managed_address_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='VirtualCenter.ManagedIP', value=runtime_managed_address)
+ )
+ diff_config['before']['runtime_managed_address'] = setting.value
+ if setting.key == 'VirtualCenter.InstanceName' and setting.value != runtime_server_name:
+ changed = True
+ changed_list.append("Server name")
+ result['runtime_server_name_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='VirtualCenter.InstanceName', value=runtime_server_name)
+ )
+ diff_config['before']['runtime_server_name'] = setting.value
+ # User directory
+ if setting.key == 'ads.timeout' and setting.value != directory_timeout:
+ changed = True
+ changed_list.append("Directory timeout")
+ result['directory_timeout_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='ads.timeout', value=directory_timeout)
+ )
+ diff_config['before']['directory_timeout'] = setting.value
+ if setting.key == 'ads.maxFetchEnabled' and setting.value != directory_query_limit:
+ changed = True
+ changed_list.append("Query limit")
+ result['directory_query_limit_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='ads.maxFetchEnabled', value=directory_query_limit)
+ )
+ diff_config['before']['directory_query_limit'] = setting.value
+ if setting.key == 'ads.maxFetch' and setting.value != directory_query_limit_size:
+ changed = True
+ changed_list.append("Query limit size")
+ result['directory_query_limit_size_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='ads.maxFetch', value=directory_query_limit_size)
+ )
+ diff_config['before']['directory_query_limit_size'] = setting.value
+ if setting.key == 'ads.checkIntervalEnabled' and setting.value != directory_validation:
+ changed = True
+ changed_list.append("Validation")
+ result['directory_validation_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='ads.checkIntervalEnabled', value=directory_validation)
+ )
+ diff_config['before']['directory_validation'] = setting.value
+ if setting.key == 'ads.checkInterval' and setting.value != directory_validation_period:
+ changed = True
+ changed_list.append("Validation period")
+ result['directory_validation_period_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='ads.checkInterval', value=directory_validation_period)
+ )
+ diff_config['before']['directory_validation_period'] = setting.value
+ # Mail
+ if setting.key == 'mail.smtp.server' and setting.value != mail_server:
+ changed = True
+ changed_list.append("Mail server")
+ result['mail_server_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='mail.smtp.server', value=mail_server)
+ )
+ diff_config['before']['mail_server'] = setting.value
+ if setting.key == 'mail.sender' and setting.value != mail_sender:
+ changed = True
+ changed_list.append("Mail sender")
+ result['mail_sender_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='mail.sender', value=mail_sender)
+ )
+ diff_config['before']['mail_sender'] = setting.value
+ # SNMP receivers - SNMP receiver #1
+ if setting.key == 'snmp.receiver.1.enabled' and setting.value != snmp_receiver_1_enabled:
+ changed = True
+ changed_list.append("SNMP-1-enabled")
+ result['snmp_1_enabled_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.1.enabled', value=snmp_receiver_1_enabled)
+ )
+ diff_config['before']['snmp_receiver_1_enabled'] = setting.value
+ if setting.key == 'snmp.receiver.1.name' and setting.value != snmp_receiver_1_url:
+ changed = True
+ changed_list.append("SNMP-1-name")
+ result['snmp_1_url_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.1.name', value=snmp_receiver_1_url)
+ )
+ diff_config['before']['snmp_receiver_1_url'] = setting.value
+ if setting.key == 'snmp.receiver.1.port' and setting.value != snmp_receiver_1_port:
+ changed = True
+ changed_list.append("SNMP-1-port")
+ result['snmp_receiver_1_port_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.1.port', value=snmp_receiver_1_port)
+ )
+ diff_config['before']['snmp_receiver_1_port'] = setting.value
+ if setting.key == 'snmp.receiver.1.community' and setting.value != snmp_receiver_1_community:
+ changed = True
+ changed_list.append("SNMP-1-community")
+ result['snmp_1_community_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.1.community', value=snmp_receiver_1_community)
+ )
+ diff_config['before']['snmp_receiver_1_community'] = setting.value
+ # SNMP receivers - SNMP receiver #2
+ if setting.key == 'snmp.receiver.2.enabled' and setting.value != snmp_receiver_2_enabled:
+ changed = True
+ changed_list.append("SNMP-2-enabled")
+ result['snmp_2_enabled_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.2.enabled', value=snmp_receiver_2_enabled)
+ )
+ diff_config['before']['snmp_receiver_2_enabled'] = setting.value
+ if setting.key == 'snmp.receiver.2.name' and setting.value != snmp_receiver_2_url:
+ changed = True
+ changed_list.append("SNMP-2-name")
+ result['snmp_2_url_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.2.name', value=snmp_receiver_2_url)
+ )
+ diff_config['before']['snmp_receiver_2_url'] = setting.value
+ if setting.key == 'snmp.receiver.2.port' and setting.value != snmp_receiver_2_port:
+ changed = True
+ changed_list.append("SNMP-2-port")
+ result['snmp_receiver_2_port_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.2.port', value=snmp_receiver_2_port)
+ )
+ diff_config['before']['snmp_receiver_2_port'] = setting.value
+ if setting.key == 'snmp.receiver.2.community' and setting.value != snmp_receiver_2_community:
+ changed = True
+ changed_list.append("SNMP-2-community")
+ result['snmp_2_community_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.2.community', value=snmp_receiver_2_community)
+ )
+ diff_config['before']['snmp_receiver_2_community'] = setting.value
+ # SNMP receivers - SNMP receiver #3
+ if setting.key == 'snmp.receiver.3.enabled' and setting.value != snmp_receiver_3_enabled:
+ changed = True
+ changed_list.append("SNMP-3-enabled")
+ result['snmp_3_enabled_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.3.enabled', value=snmp_receiver_3_enabled)
+ )
+ diff_config['before']['snmp_receiver_3_enabled'] = setting.value
+ if setting.key == 'snmp.receiver.3.name' and setting.value != snmp_receiver_3_url:
+ changed = True
+ changed_list.append("SNMP-3-name")
+ result['snmp_3_url_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.3.name', value=snmp_receiver_3_url)
+ )
+ diff_config['before']['snmp_receiver_3_url'] = setting.value
+ if setting.key == 'snmp.receiver.3.port' and setting.value != snmp_receiver_3_port:
+ changed = True
+ changed_list.append("SNMP-3-port")
+ result['snmp_receiver_3_port_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.3.port', value=snmp_receiver_3_port)
+ )
+ diff_config['before']['snmp_receiver_3_port'] = setting.value
+ if setting.key == 'snmp.receiver.3.community' and setting.value != snmp_receiver_3_community:
+ changed = True
+ changed_list.append("SNMP-3-community")
+ result['snmp_3_community_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.3.community', value=snmp_receiver_3_community)
+ )
+ diff_config['before']['snmp_receiver_3_community'] = setting.value
+ # SNMP receivers - SNMP receiver #4
+ if setting.key == 'snmp.receiver.4.enabled' and setting.value != snmp_receiver_4_enabled:
+ changed = True
+ changed_list.append("SNMP-4-enabled")
+ result['snmp_4_enabled_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.4.enabled', value=snmp_receiver_4_enabled)
+ )
+ diff_config['before']['snmp_receiver_4_enabled'] = setting.value
+ if setting.key == 'snmp.receiver.4.name' and setting.value != snmp_receiver_4_url:
+ changed = True
+ changed_list.append("SNMP-4-name")
+ result['snmp_4_url_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.4.name', value=snmp_receiver_4_url)
+ )
+ diff_config['before']['snmp_receiver_4_url'] = setting.value
+ if setting.key == 'snmp.receiver.4.port' and setting.value != snmp_receiver_4_port:
+ changed = True
+ changed_list.append("SNMP-4-port")
+ result['snmp_receiver_4_port_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.4.port', value=snmp_receiver_4_port)
+ )
+ diff_config['before']['snmp_receiver_4_port'] = setting.value
+ if setting.key == 'snmp.receiver.4.community' and setting.value != snmp_receiver_4_community:
+ changed = True
+ changed_list.append("SNMP-4-community")
+ result['snmp_4_community_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='snmp.receiver.4.community', value=snmp_receiver_4_community)
+ )
+ diff_config['before']['snmp_receiver_4_community'] = setting.value
+ # Timeout settings
+ if setting.key == 'client.timeout.normal' and setting.value != timeout_normal_operations:
+ changed = True
+ changed_list.append("Timeout normal")
+ result['timeout_normal_operations_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='client.timeout.normal', value=timeout_normal_operations)
+ )
+ diff_config['before']['timeout_normal_operations'] = setting.value
+ if setting.key == 'client.timeout.long' and setting.value != timeout_long_operations:
+ changed = True
+ changed_list.append("Timout long")
+ result['timeout_long_operations_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='client.timeout.long', value=timeout_long_operations)
+ )
+ diff_config['before']['timeout_long_operations'] = setting.value
+ # Logging settings
+ if setting.key == 'log.level' and setting.value != logging_options:
+ changed = True
+ changed_list.append("Logging")
+ result['logging_options_previous'] = setting.value
+ change_option_list.append(
+ vim.option.OptionValue(key='log.level', value=logging_options)
+ )
+ diff_config['before']['logging_options'] = setting.value
+
+ # Advanced settings
+ for advanced_setting in changed_advanced_settings:
+ if setting.key == advanced_setting.key and setting.value != advanced_setting.value:
+ changed_list.append(advanced_setting.key)
+ result[advanced_setting.key + '_previous'] = advanced_setting.value
+ diff_config['before'][advanced_setting.key] = advanced_setting.value
+
+ for advanced_setting in changed_advanced_settings:
+ if advanced_setting.key not in changed_list:
+ changed_list.append(advanced_setting.key)
+ result[advanced_setting.key + '_previous'] = "N/A"
+ diff_config['before'][advanced_setting.key] = "N/A"
+
+ if changed:
+ if self.module.check_mode:
+ changed_suffix = ' would be changed'
+ else:
+ changed_suffix = ' changed'
+ if len(changed_list) > 2:
+ message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
+ elif len(changed_list) == 2:
+ message = ' and '.join(changed_list)
+ elif len(changed_list) == 1:
+ message = changed_list[0]
+ message += changed_suffix
+ if not self.module.check_mode:
+ try:
+ self.option_manager.UpdateOptions(changedValue=change_option_list)
+ except (vmodl.fault.SystemError, vmodl.fault.InvalidArgument) as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to update option(s) as one or more OptionValue contains an invalid value: %s" %
+ to_native(invalid_argument.msg)
+ )
+ except vim.fault.InvalidName as invalid_name:
+ self.module.fail_json(
+ msg="Failed to update option(s) as one or more OptionValue objects refers to a "
+ "non-existent option : %s" % to_native(invalid_name.msg)
+ )
+ else:
+ message = "vCenter settings already configured properly"
+ result['changed'] = changed
+ result['msg'] = message
+
+ result['diff']['before'] = OrderedDict(sorted(diff_config['before'].items()))
+ result['diff']['after'] = OrderedDict(sorted(diff_config['after'].items()))
+
+ self.module.exit_json(**result)
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ database=dict(
+ type='dict',
+ options=dict(
+ max_connections=dict(type='int', default=50),
+ task_cleanup=dict(type='bool', default=True),
+ task_retention=dict(type='int', default=30),
+ event_cleanup=dict(type='bool', default=True),
+ event_retention=dict(type='int', default=30),
+ ),
+ default=dict(
+ max_connections=50,
+ task_cleanup=True,
+ task_retention=30,
+ event_cleanup=True,
+ event_retention=30,
+ ),
+ ),
+ runtime_settings=dict(
+ type='dict',
+ options=dict(
+ unique_id=dict(type='int'),
+ managed_address=dict(type='str'),
+ vcenter_server_name=dict(type='str'),
+ ),
+ ),
+ user_directory=dict(
+ type='dict',
+ options=dict(
+ timeout=dict(type='int', default=60),
+ query_limit=dict(type='bool', default=True),
+ query_limit_size=dict(type='int', default=5000),
+ validation=dict(type='bool', default=True),
+ validation_period=dict(type='int', default=1440),
+ ),
+ default=dict(
+ timeout=60,
+ query_limit=True,
+ query_limit_size=5000,
+ validation=True,
+ validation_period=1440,
+ ),
+ ),
+ mail=dict(
+ type='dict',
+ options=dict(
+ server=dict(type='str'),
+ sender=dict(type='str'),
+ ),
+ default=dict(
+ server='',
+ sender='',
+ ),
+ ),
+ snmp_receivers=dict(
+ type='dict',
+ options=dict(
+ snmp_receiver_1_url=dict(type='str', default='localhost'),
+ snmp_receiver_1_enabled=dict(type='bool', default=True),
+ snmp_receiver_1_port=dict(type='int', default=162),
+ snmp_receiver_1_community=dict(type='str', default='public'),
+ snmp_receiver_2_url=dict(type='str', default=''),
+ snmp_receiver_2_enabled=dict(type='bool', default=False),
+ snmp_receiver_2_port=dict(type='int', default=162),
+ snmp_receiver_2_community=dict(type='str', default=''),
+ snmp_receiver_3_url=dict(type='str', default=''),
+ snmp_receiver_3_enabled=dict(type='bool', default=False),
+ snmp_receiver_3_port=dict(type='int', default=162),
+ snmp_receiver_3_community=dict(type='str', default=''),
+ snmp_receiver_4_url=dict(type='str', default=''),
+ snmp_receiver_4_enabled=dict(type='bool', default=False),
+ snmp_receiver_4_port=dict(type='int', default=162),
+ snmp_receiver_4_community=dict(type='str', default=''),
+ ),
+ default=dict(
+ snmp_receiver_1_url='localhost',
+ snmp_receiver_1_enabled=True,
+ snmp_receiver_1_port=162,
+ snmp_receiver_1_community='public',
+ snmp_receiver_2_url='',
+ snmp_receiver_2_enabled=False,
+ snmp_receiver_2_port=162,
+ snmp_receiver_2_community='',
+ snmp_receiver_3_url='',
+ snmp_receiver_3_enabled=False,
+ snmp_receiver_3_port=162,
+ snmp_receiver_3_community='',
+ snmp_receiver_4_url='',
+ snmp_receiver_4_enabled=False,
+ snmp_receiver_4_port=162,
+ snmp_receiver_4_community='',
+ ),
+ ),
+ timeout_settings=dict(
+ type='dict',
+ options=dict(
+ normal_operations=dict(type='int', default=30),
+ long_operations=dict(type='int', default=120),
+ ),
+ default=dict(
+ normal_operations=30,
+ long_operations=120,
+ ),
+ ),
+ logging_options=dict(default='info', choices=['none', 'error', 'warning', 'info', 'verbose', 'trivia']),
+ advanced_settings=dict(type='dict', default=dict(), required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ host_snmp = VmwareVcenterSettings(module)
+ host_snmp.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vcenter_settings_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_vcenter_settings_info.py
new file mode 100644
index 000000000..cdf033e3a
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vcenter_settings_info.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, sky-joker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+module: vmware_vcenter_settings_info
+short_description: Gather info vCenter settings
+description:
+ - This module can be used to gather information about vCenter settings.
+author:
+ - sky-joker (@sky-joker)
+options:
+ schema:
+ description:
+ - Specify the output schema desired.
+ - The 'summary' output schema is the legacy output from the module.
+ - The 'vsphere' output schema is the vSphere API class definition which requires pyvmomi>6.7.1.
+ choices: ['summary', 'vsphere']
+ default: 'summary'
+ type: str
+ properties:
+ description:
+ - Specify the properties to retrieve.
+ - 'Example:'
+ - ' properties: ['
+ - ' "config.workflow.port"'
+ - ' ]'
+ - Only valid when C(schema) is C(vsphere).
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+"""
+
+EXAMPLES = r"""
+- name: "Gather info about vCenter settings"
+ community.vmware.vmware_vcenter_settings_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ register: vcenter_settings_info
+
+- name: "Gather some info from vCenter using the vSphere API output schema"
+ community.vmware.vmware_vcenter_settings_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ schema: vsphere
+ properties:
+ - config.workflow.port
+ register: vcenter_settings_info_vsphere_api
+"""
+
+RETURN = r"""
+vcenter_config_info:
+ description: dict of vCenter settings
+ returned: success
+ type: dict
+ sample: |
+ {
+ "db_event_cleanup_previous": true,
+ "db_event_retention_previous": 30,
+ "db_max_connections_previous": 50,
+ "db_task_cleanup_previous": true,
+ "db_task_retention_previous": 30,
+ "directory_query_limit_previous": true,
+ "directory_query_limit_size_previous": 5000,
+ "directory_timeout_previous": 60,
+ "directory_validation_period_previous": 1440,
+ "directory_validation_previous": true,
+ "logging_options_previous": "info",
+ "mail_sender_previous": "",
+ "mail_server_previous": "",
+ "runtime_managed_address_previous": "",
+ "runtime_server_name_previous": "vcenter.local",
+ "runtime_unique_id_previous": 48,
+ "snmp_1_community_previous": "public",
+ "snmp_1_enabled_previous": true,
+ "snmp_1_url_previous": "localhost",
+ "snmp_2_community_previous": "",
+ "snmp_2_enabled_previous": false,
+ "snmp_2_url_previous": "",
+ "snmp_3_community_previous": "",
+ "snmp_3_enabled_previous": false,
+ "snmp_3_url_previous": "",
+ "snmp_4_community_previous": "",
+ "snmp_4_enabled_previous": false,
+ "snmp_4_url_previous": "",
+ "snmp_receiver_1_port_previous": 162,
+ "snmp_receiver_2_port_previous": 162,
+ "snmp_receiver_3_port_previous": 162,
+ "snmp_receiver_4_port_previous": 162,
+ "timeout_long_operations_previous": 120,
+ "timeout_normal_operations_previous": 30
+ }
+"""
+
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi,
+ vmware_argument_spec,
+)
+from ansible.module_utils.basic import AnsibleModule
+
+
+class VmwareVcenterSettingsInfo(PyVmomi):
+ def __init__(self, module):
+ super(VmwareVcenterSettingsInfo, self).__init__(module)
+ self.schema = self.params["schema"]
+ self.properties = self.params["properties"]
+
+ if not self.is_vcenter():
+ self.module.fail_json(msg="You have to connect to a vCenter server!")
+
+ def ensure(self):
+ result = {}
+ exists_vcenter_config = {}
+ option_manager = self.content.setting
+
+ for setting in option_manager.setting:
+ exists_vcenter_config[setting.key] = setting.value
+
+ if self.schema == "summary":
+ common_name_value_map = {
+ # Database
+ "VirtualCenter.MaxDBConnection": "db_max_connections_previous",
+ "task.maxAgeEnabled": "db_task_cleanup_previous",
+ "task.maxAge": "db_task_retention_previous",
+ "event.maxAgeEnabled": "db_event_cleanup_previous",
+ "event.maxAge": "db_event_retention_previous",
+ # Runtime settings
+ "instance.id": "runtime_unique_id_previous",
+ "VirtualCenter.ManagedIP": "runtime_managed_address_previous",
+ "VirtualCenter.InstanceName": "runtime_server_name_previous",
+ # User directory
+ "ads.timeout": "directory_timeout_previous",
+ "ads.maxFetchEnabled": "directory_query_limit_previous",
+ "ads.maxFetch": "directory_query_limit_size_previous",
+ "ads.checkIntervalEnabled": "directory_validation_previous",
+ "ads.checkInterval": "directory_validation_period_previous",
+ # Mail
+ "mail.smtp.server": "mail_server_previous",
+ "mail.sender": "mail_sender_previous",
+ # SNMP receivers - SNMP receiver #1
+ "snmp.receiver.1.enabled": "snmp_1_enabled_previous",
+ "snmp.receiver.1.name": "snmp_1_url_previous",
+ "snmp.receiver.1.port": "snmp_receiver_1_port_previous",
+ "snmp.receiver.1.community": "snmp_1_community_previous",
+ # SNMP receivers - SNMP receiver #2
+ "snmp.receiver.2.enabled": "snmp_2_enabled_previous",
+ "snmp.receiver.2.name": "snmp_2_url_previous",
+ "snmp.receiver.2.port": "snmp_receiver_2_port_previous",
+ "snmp.receiver.2.community": "snmp_2_community_previous",
+ # SNMP receivers - SNMP receiver #3
+ "snmp.receiver.3.enabled": "snmp_3_enabled_previous",
+ "snmp.receiver.3.name": "snmp_3_url_previous",
+ "snmp.receiver.3.port": "snmp_receiver_3_port_previous",
+ "snmp.receiver.3.community": "snmp_3_community_previous",
+ # SNMP receivers - SNMP receiver #4
+ "snmp.receiver.4.enabled": "snmp_4_enabled_previous",
+ "snmp.receiver.4.name": "snmp_4_url_previous",
+ "snmp.receiver.4.port": "snmp_receiver_4_port_previous",
+ "snmp.receiver.4.community": "snmp_4_community_previous",
+ # Timeout settings
+ "client.timeout.normal": "timeout_normal_operations_previous",
+ "client.timeout.long": "timeout_long_operations_previous",
+ # Logging settings
+ "log.level": "logging_options_previous",
+ }
+
+ for key, value in common_name_value_map.items():
+ if key in exists_vcenter_config:
+ result[value] = setting.value
+ else:
+ if self.properties:
+ for property in self.properties:
+ if property in exists_vcenter_config:
+ result[property] = exists_vcenter_config[property]
+ else:
+ self.module.fail_json(msg="Propety '%s' not found" % property)
+ else:
+ for property in exists_vcenter_config.keys():
+ result[property] = exists_vcenter_config[property]
+
+ self.module.exit_json(changed=False, vcenter_config_info=result)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ schema=dict(type="str", choices=["summary", "vsphere"], default="summary"),
+ properties=dict(type="list", elements="str"),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ vmware_vcenter_settings_info = VmwareVcenterSettingsInfo(module)
+ vmware_vcenter_settings_info.ensure()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vcenter_statistics.py b/ansible_collections/community/vmware/plugins/modules/vmware_vcenter_statistics.py
new file mode 100644
index 000000000..9b8415449
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vcenter_statistics.py
@@ -0,0 +1,517 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vcenter_statistics
+short_description: Configures statistics on a vCenter server
+description:
+- This module can be used to configure the vCenter server statistics.
+- The remaining settings can be configured with the module C(vmware_vcenter_settings).
+author:
+- Christian Kotte (@ckotte)
+options:
+ interval_past_day:
+ description:
+ - Settings for vCenter server past day statistic collection.
+ suboptions:
+ enabled:
+ type: bool
+ description: Past day statistics collection enabled.
+ default: true
+ interval_minutes:
+ type: int
+ description: Interval duration in minutes.
+ choices: [ 1, 2, 3, 4, 5 ]
+ default: 5
+ save_for_days:
+ type: int
+ description: Save for value in days.
+ choices: [ 1, 2, 3, 4, 5 ]
+ default: 1
+ level:
+ type: int
+ description: Statistics level.
+ choices: [ 1, 2, 3, 4 ]
+ default: 1
+ type: dict
+ interval_past_week:
+ description:
+ - Settings for vCenter server past week statistic collection.
+ suboptions:
+ enabled:
+ type: bool
+ description: Past week statistics collection enabled.
+ default: true
+ interval_minutes:
+ type: int
+ description: Interval duration in minutes.
+ choices: [ 30 ]
+ default: 30
+ save_for_weeks:
+ type: int
+ description: Save for value in weeks.
+ choices: [ 1 ]
+ default: 1
+ level:
+ type: int
+ description: Statistics level.
+ choices: [ 1, 2, 3, 4 ]
+ default: 1
+ type: dict
+ interval_past_month:
+ description:
+ - Settings for vCenter server past month statistic collection.
+ suboptions:
+ enabled:
+ type: bool
+ description: Past month statistics collection enabled.
+ default: true
+ interval_hours:
+ type: int
+ description: Interval duration in hours.
+ choices: [ 2 ]
+ default: 2
+ save_for_months:
+ type: int
+ description: Save for value in months.
+ choices: [ 1 ]
+ default: 1
+ level:
+ type: int
+ description: Statistics level.
+ choices: [ 1, 2, 3, 4 ]
+ default: 1
+ type: dict
+ interval_past_year:
+ description:
+ - Settings for vCenter server past month statistic collection.
+ suboptions:
+ enabled:
+ type: bool
+ description: Past month statistics collection enabled.
+ default: true
+ interval_days:
+ type: int
+ description: Interval duration in days.
+ choices: [ 1 ]
+ default: 1
+ save_for_years:
+ type: int
+ description: Save for value in years.
+ choices: [ 1, 2, 3, 4, 5 ]
+ default: 1
+ level:
+ type: int
+ description: Statistics level.
+ choices: [ 1, 2, 3, 4 ]
+ default: 1
+ type: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure vCenter statistics
+ community.vmware.vmware_vcenter_statistics:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ interval_past_day:
+ enabled: true
+ interval_minutes: 5
+ save_for_days: 1
+ level: 1
+ interval_past_week:
+ enabled: true
+ level: 1
+ interval_past_month:
+ enabled: true
+ level: 1
+ interval_past_year:
+ enabled: true
+ save_for_years: 1
+ level: 1
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+results:
+ description: metadata about vCenter statistics settings
+ returned: always
+ type: dict
+ sample: {
+ "changed": false,
+ "msg": "vCenter statistics already configured properly",
+ "past_day_enabled": true,
+ "past_day_interval": 5,
+ "past_day_level": 1,
+ "past_day_save_for": 1,
+ "past_month_enabled": true,
+ "past_month_interval": 2,
+ "past_month_level": 1,
+ "past_month_save_for": 1,
+ "past_week_enabled": true,
+ "past_week_interval": 30,
+ "past_week_level": 1,
+ "past_week_save_for": 1,
+ "past_year_enabled": true,
+ "past_year_interval": 1,
+ "past_year_level": 1,
+ "past_year_save_for": 1
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils._text import to_native
+
+
+# This is a helper class to sort the changes in a valid order
+# "Greater than" means a change has to happen after another one.
+# As an example, let's say self is daily (key == 1) and other is weekly (key == 2)
+class ChangeHelper:
+ def __init__(self, old, new):
+ self.key = new.key
+ self.old = old
+ self.new = new
+
+ def __eq__(self, other):
+ return ((self.key, self.new.enabled, self.new.level)
+ == (other.key, other.new.enabled, other.new.level))
+
+ def __gt__(self, other):
+ if self.key < other.key:
+ # You cannot disable daily if weekly is enabled, so later
+ if self.new.enabled < other.old.enabled:
+ return True
+ # Enabling daily is OK if weekly is disabled
+ elif self.new.enabled > other.old.enabled:
+ return False
+ # Otherwise, decreasing the daily level below the current weekly level has to be done later
+ else:
+ return self.new.level < other.old.level
+ else:
+ return not (other > self)
+
+ def __ge__(self, other):
+ return (self > other) or (self == other)
+
+ def __lt__(self, other):
+ return not (self >= other)
+
+ def __le__(self, other):
+ return not (self > other)
+
+
+class VmwareVcenterStatistics(PyVmomi):
+ """Manage statistics for a vCenter server"""
+
+ def __init__(self, module):
+ super(VmwareVcenterStatistics, self).__init__(module)
+
+ if not self.is_vcenter():
+ self.module.fail_json(msg="You have to connect to a vCenter server!")
+
+ def ensure(self):
+ """Manage statistics for a vCenter server"""
+
+ DAILY_COUNTER = 1
+ WEEKLY_COUNTER = 2
+ MONTHLY_COUNTER = 3
+ YEARLY_COUNTER = 4
+
+ result = dict(changed=False, msg='')
+ message = ''
+
+ past_day_enabled = self.params['interval_past_day'].get('enabled', True)
+ past_day_seconds = self.params['interval_past_day'].get('interval_minutes', 5) * 60
+ past_day_save_for_seconds = self.params['interval_past_day'].get('save_for_days', 1) * 86400
+ past_day_level = self.params['interval_past_day'].get('level', 1)
+ past_week_enabled = self.params['interval_past_week'].get('enabled', True)
+ past_week_seconds = self.params['interval_past_week'].get('interval_minutes', 30) * 60
+ past_week_save_for_seconds = self.params['interval_past_week'].get('save_for_weeks', 1) * 604800
+ past_week_level = self.params['interval_past_week'].get('level', 1)
+ past_month_enabled = self.params['interval_past_month'].get('enabled', True)
+ past_month_seconds = self.params['interval_past_month'].get('interval_hours', 2) * 3600
+ past_month_save_for_seconds = self.params['interval_past_month'].get('save_for_months', 1) * 2592000
+ past_month_level = self.params['interval_past_month'].get('level', 1)
+ past_year_enabled = self.params['interval_past_year'].get('enabled', True)
+ past_year_seconds = self.params['interval_past_year'].get('interval_days', 1) * 86400
+ past_year_save_for_seconds = self.params['interval_past_year'].get('save_for_years', 1) * 31536000
+ past_year_level = self.params['interval_past_year'].get('level', 1)
+
+ # Check if level options are valid
+ if past_year_level > past_month_level:
+ self.module.fail_json(msg="The statistics level for past year can't be higher than past month!")
+ if past_month_level > past_week_level:
+ self.module.fail_json(msg="The statistics level for past month can't be higher than past week!")
+ if past_week_level > past_day_level:
+ self.module.fail_json(msg="The statistics level for past week can't be higher than past day!")
+
+ # Check if state options are valid
+ if not past_day_enabled and (past_week_enabled or past_month_enabled or past_year_enabled):
+ self.module.fail_json(msg="The intervals past week, month, and year need to be disabled as well!")
+ if not past_week_enabled and (past_month_enabled or past_year_enabled):
+ self.module.fail_json(msg="The intervals past month, and year need to be disabled as well!")
+ if not past_month_enabled and past_year_enabled:
+ self.module.fail_json(msg="The interval past year need to be disabled as well!")
+ if past_year_enabled and (not past_day_enabled or not past_week_enabled or not past_month_enabled):
+ self.module.fail_json(msg="The intervals past day, week, and month need to be enabled as well!")
+ if past_month_enabled and (not past_day_enabled or not past_week_enabled):
+ self.module.fail_json(msg="The intervals past day, and week need to be enabled as well!")
+ if past_week_enabled and (not past_day_enabled):
+ self.module.fail_json(msg="The intervals past day need to be enabled as well!")
+
+ changed = False
+ changed_list = []
+
+ # Check statistics
+ result['past_day_enabled'] = past_day_enabled
+ result['past_day_interval'] = int(past_day_seconds / 60)
+ result['past_day_save_for'] = int(past_day_save_for_seconds / 86400)
+ result['past_day_level'] = past_day_level
+ result['past_week_enabled'] = past_week_enabled
+ result['past_week_interval'] = int(past_week_seconds / 60)
+ result['past_week_save_for'] = int(past_week_save_for_seconds / 604800)
+ result['past_week_level'] = past_week_level
+ result['past_month_enabled'] = past_month_enabled
+ result['past_month_interval'] = int(past_month_seconds / 3600)
+ result['past_month_save_for'] = int(past_month_save_for_seconds / 2592000)
+ result['past_month_level'] = past_month_level
+ result['past_year_enabled'] = past_year_enabled
+ result['past_year_interval'] = int(past_year_seconds / 86400)
+ result['past_year_save_for'] = int(past_year_save_for_seconds / 31536000)
+ result['past_year_level'] = past_year_level
+ change_statistics_list = []
+ perf_manager = self.content.perfManager
+ for historical_interval in perf_manager.historicalInterval:
+ # Statistics for past day
+ if historical_interval.name == 'Past day' and (
+ historical_interval.samplingPeriod != past_day_seconds
+ or historical_interval.length != past_day_save_for_seconds
+ or historical_interval.level != past_day_level
+ or historical_interval.enabled != past_day_enabled
+ ):
+ changed = True
+ changed_list.append("Past day interval")
+ if historical_interval.enabled != past_day_enabled:
+ result['past_day_enabled_previous'] = historical_interval.enabled
+ if historical_interval.samplingPeriod != past_day_seconds:
+ result['past_day_interval_previous'] = int(historical_interval.samplingPeriod / 60)
+ if historical_interval.length != past_day_save_for_seconds:
+ result['past_day_save_for_previous'] = int(historical_interval.length / 86400)
+ if historical_interval.level != past_day_level:
+ result['past_day_level_previous'] = historical_interval.level
+
+ change_statistics_list.append(
+ ChangeHelper(
+ historical_interval,
+ vim.HistoricalInterval(
+ key=DAILY_COUNTER,
+ samplingPeriod=past_day_seconds,
+ name='Past day',
+ length=past_day_save_for_seconds,
+ level=past_day_level,
+ enabled=past_day_enabled
+ )
+ )
+ )
+
+ # Statistics for past week
+ if historical_interval.name == 'Past week' and (
+ historical_interval.samplingPeriod != past_week_seconds
+ or historical_interval.length != past_week_save_for_seconds
+ or historical_interval.level != past_week_level
+ or historical_interval.enabled != past_week_enabled
+ ):
+ changed = True
+ changed_list.append("Past week interval")
+ if historical_interval.enabled != past_week_enabled:
+ result['past_week_enabled_previous'] = historical_interval.enabled
+ if historical_interval.samplingPeriod != past_week_seconds:
+ result['past_week_interval_previous'] = int(historical_interval.samplingPeriod / 60)
+ if historical_interval.length != past_week_save_for_seconds:
+ result['past_week_save_for_previous'] = int(historical_interval.length / 604800)
+ if historical_interval.level != past_week_level:
+ result['past_week_level_previous'] = historical_interval.level
+
+ change_statistics_list.append(
+ ChangeHelper(
+ historical_interval,
+ vim.HistoricalInterval(
+ key=WEEKLY_COUNTER,
+ samplingPeriod=past_week_seconds,
+ name='Past week',
+ length=past_week_save_for_seconds,
+ level=past_week_level,
+ enabled=past_week_enabled
+ )
+ )
+ )
+
+ # Statistics for past month
+ if historical_interval.name == 'Past month' and (
+ historical_interval.samplingPeriod != past_month_seconds
+ or historical_interval.length != past_month_save_for_seconds
+ or historical_interval.level != past_month_level
+ or historical_interval.enabled != past_month_enabled
+ ):
+ changed = True
+ changed_list.append("Past month interval")
+ if historical_interval.enabled != past_month_enabled:
+ result['past_month_enabled_previous'] = historical_interval.enabled
+ if historical_interval.samplingPeriod != past_month_seconds:
+ result['past_month_interval_previous'] = int(historical_interval.samplingPeriod / 3600)
+ if historical_interval.length != past_month_save_for_seconds:
+ result['past_month_save_for_previous'] = int(historical_interval.length / 2592000)
+ if historical_interval.level != past_month_level:
+ result['past_month_level_previous'] = historical_interval.level
+
+ change_statistics_list.append(
+ ChangeHelper(
+ historical_interval,
+ vim.HistoricalInterval(
+ key=MONTHLY_COUNTER,
+ samplingPeriod=past_month_seconds,
+ name='Past month',
+ length=past_month_save_for_seconds,
+ level=past_month_level,
+ enabled=past_month_enabled
+ )
+ )
+ )
+
+ # Statistics for past year
+ if historical_interval.name == 'Past year' and (
+ historical_interval.samplingPeriod != past_year_seconds
+ or historical_interval.length != past_year_save_for_seconds
+ or historical_interval.level != past_year_level
+ or historical_interval.enabled != past_year_enabled
+ ):
+ changed = True
+ changed_list.append("Past year interval")
+ if historical_interval.enabled != past_year_enabled:
+ result['past_year_enabled_previous'] = historical_interval.enabled
+ if historical_interval.samplingPeriod != past_year_seconds:
+ result['past_year_interval_previous'] = int(historical_interval.samplingPeriod / 86400)
+ if historical_interval.length != past_year_save_for_seconds:
+ result['past_year_save_for_previous'] = int(historical_interval.length / 31536000)
+ if historical_interval.level != past_year_level:
+ result['past_year_level_previous'] = historical_interval.level
+
+ change_statistics_list.append(
+ ChangeHelper(
+ historical_interval,
+ vim.HistoricalInterval(
+ key=YEARLY_COUNTER,
+ samplingPeriod=past_year_seconds,
+ name='Past year',
+ length=past_year_save_for_seconds,
+ level=past_year_level,
+ enabled=past_year_enabled
+ )
+ )
+ )
+
+ if changed:
+ if self.module.check_mode:
+ changed_suffix = ' would be changed'
+ else:
+ changed_suffix = ' changed'
+ if len(changed_list) > 2:
+ message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
+ elif len(changed_list) == 2:
+ message = ' and '.join(changed_list)
+ elif len(changed_list) == 1:
+ message = changed_list[0]
+ message += changed_suffix
+ if not self.module.check_mode:
+ change_statistics_list.sort()
+ for statistic in change_statistics_list:
+ self.update_perf_interval(perf_manager, statistic.new)
+ else:
+ message = "vCenter statistics already configured properly"
+ result['changed'] = changed
+ result['msg'] = message
+
+ self.module.exit_json(**result)
+
+ def update_perf_interval(self, perf_manager, statistic):
+ """Update statistics interval"""
+ try:
+ perf_manager.UpdatePerfInterval(statistic)
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(
+ msg="The set of arguments passed to the function is not specified correctly or "
+ "the update does not conform to the rules: %s" % to_native(invalid_argument.msg)
+ )
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ interval_past_day=dict(
+ type='dict',
+ options=dict(
+ enabled=dict(type='bool', default=True),
+ interval_minutes=dict(type='int', choices=[1, 2, 3, 4, 5], default=5),
+ save_for_days=dict(type='int', choices=[1, 2, 3, 4, 5], default=1),
+ level=dict(type='int', choices=[1, 2, 3, 4], default=1),
+ ),
+ ),
+ interval_past_week=dict(
+ type='dict',
+ options=dict(
+ enabled=dict(type='bool', default=True),
+ interval_minutes=dict(type='int', choices=[30], default=30),
+ save_for_weeks=dict(type='int', choices=[1], default=1),
+ level=dict(type='int', choices=[1, 2, 3, 4], default=1),
+ ),
+ ),
+ interval_past_month=dict(
+ type='dict',
+ options=dict(
+ enabled=dict(type='bool', default=True),
+ interval_hours=dict(type='int', choices=[2], default=2),
+ save_for_months=dict(type='int', choices=[1], default=1),
+ level=dict(type='int', choices=[1, 2, 3, 4], default=1),
+ ),
+ ),
+ interval_past_year=dict(
+ type='dict',
+ options=dict(
+ enabled=dict(type='bool', default=True),
+ interval_days=dict(type='int', choices=[1], default=1),
+ save_for_years=dict(type='int', choices=[1, 2, 3, 4, 5], default=1),
+ level=dict(type='int', choices=[1, 2, 3, 4], default=1),
+ ),
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ host_snmp = VmwareVcenterStatistics(module)
+ host_snmp.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vm_config_option.py b/ansible_collections/community/vmware/plugins/modules/vmware_vm_config_option.py
new file mode 100644
index 000000000..171fd476e
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vm_config_option.py
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2021, Ansible Project
+# Copyright: (c) 2021, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vm_config_option
+short_description: Return supported guest ID list and VM recommended config option for specific guest OS
+description: >
+ This module is used for getting the hardware versions supported for creation, the guest ID list supported by ESXi
+ host for the most recent virtual hardware supported or specified hardware version, the VM recommended config options
+ for specified guest OS ID.
+author:
+- Diane Wang (@Tomorrow9) <dianew@vmware.com>
+notes:
+- Known issue on vSphere 7.0 (https://github.com/vmware/pyvmomi/issues/915)
+options:
+ datacenter:
+ description:
+ - The datacenter name used to get specified cluster or host.
+ - This parameter is case sensitive.
+ default: ha-datacenter
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname.
+ - Obtain VM configure options on this ESXi host.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+ get_hardware_versions:
+ description:
+ - Return the list of VM hardware versions supported for creation and the default hardware version on the
+ specified entity.
+ type: bool
+ default: false
+ get_guest_os_ids:
+ description:
+ - Return the list of guest OS IDs supported on the specified entity.
+ - If C(hardware_version) is set, will return the corresponding guest OS ID list supported, or will return the
+ guest OS ID list for the default hardware version.
+ type: bool
+ default: false
+ get_config_options:
+ description:
+ - Return the dict of VM recommended config options for guest ID specified by C(guest_id) with hardware version
+ specified by C(hardware_version) or the default hardware version.
+ - When set to True, C(guest_id) must be set.
+ type: bool
+ default: false
+ guest_id:
+ description:
+ - The guest OS ID from the returned list when C(get_guest_os_ids) is set to C(true), e.g., 'rhel8_64Guest'.
+ - This parameter must be set when C(get_config_options) is set to C(true).
+ type: str
+ hardware_version:
+ description:
+ - The hardware version from the returned list when C(get_hardware_versions) is set to C(true), e.g., 'vmx-19'.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Get supported guest ID list on given ESXi host for with default hardware version
+ community.vmware.vmware_vm_config_option:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ get_guest_os_ids: true
+ delegate_to: localhost
+
+- name: Get VM recommended config option for Windows 10 guest OS on given ESXi host
+ community.vmware.vmware_vm_config_option:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: "{{ esxi_hostname }}"
+ get_config_options: true
+ guest_id: "windows9_64Guest"
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+instance:
+ description: metadata about the VM recommended configuration
+ returned: always
+ type: dict
+ sample: None
+'''
+
+HAS_PYVMOMI = False
+try:
+ from pyVmomi import vim
+ HAS_PYVMOMI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import find_obj, vmware_argument_spec, PyVmomi
+from ansible_collections.community.vmware.plugins.module_utils.vm_device_helper import PyVmomiDeviceHelper
+
+
+class VmConfigOption(PyVmomi):
+ def __init__(self, module):
+ super(VmConfigOption, self).__init__(module)
+ self.device_helper = PyVmomiDeviceHelper(self.module)
+ self.ctl_device_type = self.device_helper.scsi_device_type.copy()
+ self.ctl_device_type.update({'sata': self.device_helper.sata_device_type,
+ 'nvme': self.device_helper.nvme_device_type}
+ )
+ self.ctl_device_type.update(self.device_helper.usb_device_type)
+ self.ctl_device_type.update(self.device_helper.nic_device_type)
+ self.target_host = None
+
+ def get_hardware_versions(self, env_browser):
+ support_create = []
+ default_config = ''
+ try:
+ desc = env_browser.QueryConfigOptionDescriptor()
+ except Exception as e:
+ self.module.fail_json(msg="Failed to obtain VM config option descriptor due to fault: %s" % to_native(e))
+ if desc:
+ for option_desc in desc:
+ if option_desc.createSupported:
+ support_create = support_create + [option_desc.key]
+ if option_desc.defaultConfigOption:
+ default_config = option_desc.key
+
+ return support_create, default_config
+
+ def get_config_option_by_spec(self, env_browser, guest_id=None, key=''):
+ vm_config_option = None
+ if guest_id is None:
+ guest_id = []
+ if self.is_vcenter():
+ host = self.target_host
+ else:
+ host = None
+ config_query_spec = vim.EnvironmentBrowser.ConfigOptionQuerySpec(guestId=guest_id, host=host, key=key)
+ try:
+ vm_config_option = env_browser.QueryConfigOptionEx(spec=config_query_spec)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to obtain VM config options due to fault: %s" % to_native(e))
+
+ return vm_config_option
+
+ def get_config_option_recommended(self, guest_os_desc, hwv_version=''):
+ guest_os_option_dict = {}
+ support_usb_controller = []
+ support_disk_controller = []
+ support_ethernet_card = []
+ if guest_os_desc and len(guest_os_desc) != 0:
+ default_disk_ctl = default_ethernet = default_cdrom_ctl = default_usb_ctl = ''
+ for name, dev_type in self.ctl_device_type.items():
+ for supported_type in guest_os_desc[0].supportedUSBControllerList:
+ if supported_type == dev_type:
+ support_usb_controller = support_usb_controller + [name]
+ if dev_type == guest_os_desc[0].recommendedUSBController:
+ default_usb_ctl = name
+ for supported_type in guest_os_desc[0].supportedEthernetCard:
+ if supported_type == dev_type:
+ support_ethernet_card = support_ethernet_card + [name]
+ if dev_type == guest_os_desc[0].recommendedEthernetCard:
+ default_ethernet = name
+ for supported_type in guest_os_desc[0].supportedDiskControllerList:
+ if supported_type == dev_type:
+ support_disk_controller = support_disk_controller + [name]
+ if dev_type == guest_os_desc[0].recommendedDiskController:
+ default_disk_ctl = name
+ if dev_type == guest_os_desc[0].recommendedCdromController:
+ default_cdrom_ctl = name
+ guest_os_option_dict = {
+ 'hardware_version': hwv_version,
+ 'guest_id': guest_os_desc[0].id,
+ 'guest_fullname': guest_os_desc[0].fullName,
+ 'rec_cpu_cores_per_socket': guest_os_desc[0].numRecommendedCoresPerSocket,
+ 'rec_cpu_socket': guest_os_desc[0].numRecommendedPhysicalSockets,
+ 'rec_memory_mb': guest_os_desc[0].recommendedMemMB,
+ 'rec_firmware': guest_os_desc[0].recommendedFirmware,
+ 'default_secure_boot': guest_os_desc[0].defaultSecureBoot,
+ 'support_secure_boot': guest_os_desc[0].supportsSecureBoot,
+ 'default_disk_controller': default_disk_ctl,
+ 'rec_disk_mb': guest_os_desc[0].recommendedDiskSizeMB,
+ 'default_ethernet': default_ethernet,
+ 'default_cdrom_controller': default_cdrom_ctl,
+ 'default_usb_controller': default_usb_ctl,
+ 'support_tpm_20': guest_os_desc[0].supportsTPM20,
+ 'support_persistent_memory': guest_os_desc[0].persistentMemorySupported,
+ 'rec_persistent_memory': guest_os_desc[0].recommendedPersistentMemoryMB,
+ 'support_min_persistent_mem_mb': guest_os_desc[0].supportedMinPersistentMemoryMB,
+ 'rec_vram_kb': guest_os_desc[0].vRAMSizeInKB.defaultValue,
+ 'support_usb_controller': support_usb_controller,
+ 'support_disk_controller': support_disk_controller,
+ 'support_ethernet_card': support_ethernet_card
+ }
+
+ return guest_os_option_dict
+
+ def get_guest_id_list(self, guest_os_desc):
+ gos_id_list = []
+ if guest_os_desc:
+ for gos_desc in guest_os_desc.guestOSDescriptor:
+ gos_id_list = gos_id_list + [gos_desc.id]
+
+ return gos_id_list
+
+ def get_config_option_for_guest(self):
+ results = {}
+ guest_id = []
+ datacenter_name = self.params.get('datacenter')
+ cluster_name = self.params.get('cluster_name')
+ esxi_host_name = self.params.get('esxi_hostname')
+ if self.params.get('guest_id'):
+ guest_id = [self.params.get('guest_id')]
+
+ if not self.params.get('get_hardware_versions') and not self.params.get('get_guest_os_ids') \
+ and not self.params.get('get_config_options'):
+ self.module.exit_json(msg="Please set at least one of these parameters 'get_hardware_versions',"
+ " 'get_guest_os_ids', 'get_config_options' to True to get the desired info.")
+ if self.params.get('get_config_options') and len(guest_id) == 0:
+ self.module.fail_json(msg="Please set 'guest_id' when 'get_config_options' is set to True,"
+ " to get the VM recommended config option for specific guest OS.")
+
+ # Get the datacenter object
+ datacenter = find_obj(self.content, [vim.Datacenter], datacenter_name)
+ if not datacenter:
+ self.module.fail_json(msg='Unable to find datacenter "%s"' % datacenter_name)
+ # Get the cluster object
+ if cluster_name:
+ cluster = find_obj(self.content, [vim.ComputeResource], cluster_name, folder=datacenter)
+ if not cluster:
+ self.module.fail_json(msg='Unable to find cluster "%s"' % cluster_name)
+ # If host is given, get the cluster object using the host
+ elif esxi_host_name:
+ host = find_obj(self.content, [vim.HostSystem], esxi_host_name, folder=datacenter)
+ if not host:
+ self.module.fail_json(msg='Unable to find host "%s"' % esxi_host_name)
+ self.target_host = host
+ cluster = host.parent
+ # Define the environment browser object the ComputeResource presents
+ env_browser = cluster.environmentBrowser
+ if env_browser is None:
+ self.module.fail_json(msg="The environmentBrowser of the ComputeResource is None, so can not get the"
+ " desired config option info, please check your vSphere environment.")
+ # Get supported hardware versions list
+ support_create_list, default_config = self.get_hardware_versions(env_browser=env_browser)
+ if self.params.get('get_hardware_versions'):
+ results.update({'supported_hardware_versions': support_create_list,
+ 'default_hardware_version': default_config})
+
+ if self.params.get('get_guest_os_ids') or self.params.get('get_config_options'):
+ # Get supported guest ID list
+ hardware_version = self.params.get('hardware_version', '')
+ if hardware_version and len(support_create_list) != 0 and hardware_version not in support_create_list:
+ self.module.fail_json(msg="Specified hardware version '%s' is not in the supported create list: %s"
+ % (hardware_version, support_create_list))
+ vm_config_option_all = self.get_config_option_by_spec(env_browser=env_browser, key=hardware_version)
+ supported_gos_list = self.get_guest_id_list(guest_os_desc=vm_config_option_all)
+ if self.params.get('get_guest_os_ids'):
+ results.update({vm_config_option_all.version: supported_gos_list})
+
+ if self.params.get('get_config_options') and len(guest_id) != 0:
+ if supported_gos_list and guest_id[0] not in supported_gos_list:
+ self.module.fail_json(msg="Specified guest ID '%s' is not in the supported guest ID list: '%s'"
+ % (guest_id[0], supported_gos_list))
+ vm_config_option_guest = self.get_config_option_by_spec(env_browser=env_browser, guest_id=guest_id,
+ key=hardware_version)
+ guest_os_options = vm_config_option_guest.guestOSDescriptor
+ guest_os_option_dict = self.get_config_option_recommended(guest_os_desc=guest_os_options,
+ hwv_version=vm_config_option_guest.version)
+ results.update({'recommended_config_options': guest_os_option_dict})
+
+ self.module.exit_json(changed=False, failed=False, instance=results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(type='str', default='ha-datacenter'),
+ cluster_name=dict(type='str'),
+ esxi_hostname=dict(type='str'),
+ get_hardware_versions=dict(type='bool', default=False),
+ get_guest_os_ids=dict(type='bool', default=False),
+ get_config_options=dict(type='bool', default=False),
+ guest_id=dict(type='str'),
+ hardware_version=dict(type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ]
+ )
+ vm_config_option_guest = VmConfigOption(module)
+ vm_config_option_guest.get_config_option_for_guest()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vm_host_drs_rule.py b/ansible_collections/community/vmware/plugins/modules/vmware_vm_host_drs_rule.py
new file mode 100644
index 000000000..68f1dc629
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vm_host_drs_rule.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Karsten Kaj Jakobsen <kj@patientsky.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - "Karsten Kaj Jakobsen (@karstenjakobsen)"
+description:
+ - "This module can be used to create VM-Host rules in a given cluster."
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+module: vmware_vm_host_drs_rule
+options:
+ affinity_rule:
+ default: true
+ description:
+ - "If set to C(true), the DRS rule will be an Affinity rule."
+ - "If set to C(false), the DRS rule will be an Anti-Affinity rule."
+ - "Effective only if C(state) is set to C(present)."
+ type: bool
+ datacenter:
+ aliases:
+ - datacenter_name
+ description:
+ - "Datacenter to search for given cluster. If not set, we use first cluster we encounter with C(cluster_name)."
+ required: false
+ type: str
+ cluster_name:
+ description:
+ - "Cluster to create VM-Host rule."
+ required: true
+ type: str
+ drs_rule_name:
+ description:
+ - "Name of rule to create or remove."
+ required: true
+ type: str
+ enabled:
+ default: false
+ description:
+ - "If set to C(true), the DRS rule will be enabled."
+ - "Effective only if C(state) is set to C(present)."
+ type: bool
+ host_group_name:
+ description:
+ - "Name of Host group to use with rule."
+ - "Effective only if C(state) is set to C(present)."
+ type: str
+ mandatory:
+ default: false
+ description:
+ - "If set to C(true), the DRS rule will be mandatory."
+ - "Effective only if C(state) is set to C(present)."
+ type: bool
+ state:
+ choices:
+ - present
+ - absent
+ default: present
+ description:
+ - "If set to C(present) and the rule does not exist then the rule will be created."
+ - "If set to C(absent) and the rule exists then the rule will be deleted."
+ type: str
+ vm_group_name:
+ description:
+ - "Name of VM group to use with rule."
+ - "Effective only if C(state) is set to C(present)."
+ type: str
+short_description: "Creates vm/host group in a given cluster"
+
+'''
+
+EXAMPLES = r'''
+---
+- name: "Create mandatory DRS Affinity rule for VM/Host"
+ community.vmware.vmware_vm_host_drs_rule:
+ hostname: "{{ vcenter_hostname }}"
+ password: "{{ vcenter_password }}"
+ username: "{{ vcenter_username }}"
+ cluster_name: DC0_C0
+ drs_rule_name: drs_rule_host_aff_0001
+ host_group_name: DC0_C0_HOST_GR1
+ vm_group_name: DC0_C0_VM_GR1
+ mandatory: true
+ enabled: true
+ affinity_rule: true
+'''
+
+RETURN = r'''
+
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi, vmware_argument_spec, wait_for_task, find_cluster_by_name,
+ find_datacenter_by_name)
+
+
+class VmwareVmHostRuleDrs(PyVmomi):
+ """
+ Class to manage VM HOST DRS Rules
+ """
+
+ def __init__(self, module):
+ """
+ Doctring: Init
+ """
+
+ super(VmwareVmHostRuleDrs, self).__init__(module)
+
+ self.__datacenter_name = module.params.get('datacenter', None)
+ self.__datacenter_obj = None
+ self.__cluster_name = module.params['cluster_name']
+ self.__cluster_obj = None
+ self.__vm_group_name = module.params.get('vm_group_name', None)
+ self.__host_group_name = module.params.get('host_group_name', None)
+ self.__rule_name = module.params['drs_rule_name']
+ self.__enabled = module.params['enabled']
+ self.__mandatory = module.params['mandatory']
+ self.__affinity_rule = module.params['affinity_rule']
+ self.__msg = 'Nothing to see here...'
+ self.__result = dict()
+ self.__changed = False
+
+ if self.__datacenter_name is not None:
+
+ self.__datacenter_obj = find_datacenter_by_name(self.content, self.__datacenter_name)
+
+ if self.__datacenter_obj is None and module.check_mode is False:
+ raise Exception("Datacenter '%s' not found" % self.__datacenter_name)
+
+ self.__cluster_obj = find_cluster_by_name(content=self.content,
+ cluster_name=self.__cluster_name,
+ datacenter=self.__datacenter_obj)
+
+ # Throw error if cluster does not exist
+ if self.__cluster_obj is None and module.check_mode is False:
+ raise Exception("Cluster '%s' not found" % self.__cluster_name)
+
+ def get_msg(self):
+ """
+ Returns message for Ansible result
+ Args: none
+
+ Returns: string
+ """
+ return self.__msg
+
+ def get_result(self):
+ """
+ Returns result for Ansible
+ Args: none
+
+ Returns: dict
+ """
+ return self.__result
+
+ def get_changed(self):
+ """
+ Returns if anything changed
+ Args: none
+
+ Returns: boolean
+ """
+ return self.__changed
+
+ def __get_rule_key_by_name(self, cluster_obj=None, rule_name=None):
+ """
+ Function to get a specific VM-Host DRS rule key by name
+ Args:
+ rule_name: Name of rule
+ cluster_obj: Cluster managed object
+
+ Returns: Rule Object if found or None
+
+ """
+
+ if cluster_obj is None:
+ cluster_obj = self.__cluster_obj
+
+ if rule_name is None:
+ rule_name = self.__rule_name
+
+ if rule_name:
+ rules_list = [rule for rule in cluster_obj.configuration.rule if rule.name == rule_name]
+ if rules_list:
+ return rules_list[0]
+
+ # No rule found
+ return None
+
+ def __normalize_vm_host_rule_spec(self, rule_obj, cluster_obj=None):
+ """
+ Return human readable rule spec
+ Args:
+ rule_obj: Rule managed object
+ cluster_obj: Cluster managed object
+
+ Returns: Dictionary with VM-Host DRS Rule info
+
+ """
+ if cluster_obj is None:
+ cluster_obj = self.__cluster_obj
+
+ if not all([rule_obj, cluster_obj]):
+ return {}
+
+ return dict(rule_key=rule_obj.key,
+ rule_enabled=rule_obj.enabled,
+ rule_name=rule_obj.name,
+ rule_mandatory=rule_obj.mandatory,
+ rule_uuid=rule_obj.ruleUuid,
+ rule_vm_group_name=rule_obj.vmGroupName,
+ rule_affine_host_group_name=rule_obj.affineHostGroupName,
+ rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName,
+ rule_vms=self.__get_all_from_group(group_name=rule_obj.vmGroupName,
+ cluster_obj=cluster_obj),
+ rule_affine_hosts=self.__get_all_from_group(group_name=rule_obj.affineHostGroupName,
+ cluster_obj=cluster_obj,
+ host_group=True),
+ rule_anti_affine_hosts=self.__get_all_from_group(group_name=rule_obj.antiAffineHostGroupName,
+ cluster_obj=cluster_obj,
+ host_group=True),
+ rule_type="vm_host_rule"
+ )
+
+ def __get_all_from_group(self, group_name=None, cluster_obj=None, host_group=False):
+ """
+ Return all VM / Host names using given group name
+ Args:
+ group_name: Rule name
+ cluster_obj: Cluster managed object
+ host_group: True if we want only host name from group
+
+ Returns: List of VM-Host names belonging to given group object
+
+ """
+ obj_name_list = []
+
+ if not all([group_name, cluster_obj]):
+ return obj_name_list
+
+ for group in cluster_obj.configurationEx.group:
+ if group.name != group_name:
+ continue
+ if not host_group and isinstance(group, vim.cluster.VmGroup):
+ obj_name_list = [vm.name for vm in group.vm]
+ break
+ if host_group and isinstance(group, vim.cluster.HostGroup):
+ obj_name_list = [host.name for host in group.host]
+ break
+
+ return obj_name_list
+
+ def __check_rule_has_changed(self, rule_obj, cluster_obj=None):
+ """
+ Function to check if the rule being edited has changed
+ """
+
+ if cluster_obj is None:
+ cluster_obj = self.__cluster_obj
+
+ existing_rule = self.__normalize_vm_host_rule_spec(rule_obj=rule_obj, cluster_obj=cluster_obj)
+
+ # Check if anything has changed
+ if (
+ (existing_rule["rule_enabled"] == self.__enabled)
+ and (existing_rule["rule_mandatory"] == self.__mandatory)
+ and (existing_rule["rule_vm_group_name"] == self.__vm_group_name)
+ and (
+ existing_rule["rule_affine_host_group_name"] == self.__host_group_name
+ or existing_rule["rule_anti_affine_host_group_name"]
+ == self.__host_group_name
+ )
+ ):
+
+ return False
+ else:
+ return True
+
+ def create(self):
+ """
+ Function to create a host VM-Host DRS rule if rule does not exist
+ """
+ rule_obj = self.__get_rule_key_by_name(rule_name=self.__rule_name)
+
+ # Check if rule exists
+ if rule_obj:
+
+ operation = 'edit'
+ rule_changed = self.__check_rule_has_changed(rule_obj)
+
+ else:
+ operation = 'add'
+
+ # Check if anything has changed when editing
+ if operation == 'add' or (operation == 'edit' and rule_changed is True):
+
+ rule = vim.cluster.VmHostRuleInfo()
+
+ # Check if already rule exists
+ if rule_obj:
+ # This need to be set in order to edit a existing rule
+ rule.key = rule_obj.key
+
+ rule.enabled = self.__enabled
+ rule.mandatory = self.__mandatory
+ rule.name = self.__rule_name
+
+ if self.__affinity_rule:
+ rule.affineHostGroupName = self.__host_group_name
+ else:
+ rule.antiAffineHostGroupName = self.__host_group_name
+
+ rule.vmGroupName = self.__vm_group_name
+
+ rule_spec = vim.cluster.RuleSpec(info=rule, operation=operation)
+ config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
+
+ if not self.module.check_mode:
+
+ task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True)
+ wait_for_task(task)
+
+ self.__changed = True
+
+ rule_obj = self.__get_rule_key_by_name(rule_name=self.__rule_name)
+ self.__result = self.__normalize_vm_host_rule_spec(rule_obj)
+
+ if operation == 'edit':
+ self.__msg = "Updated DRS rule `%s` successfully" % (self.__rule_name)
+ else:
+ self.__msg = "Created DRS rule `%s` successfully" % (self.__rule_name)
+
+ # Delete
+ def delete(self, rule_name=None):
+ """
+ Function to delete VM-Host DRS rule using name
+ """
+ if rule_name is None:
+ rule_name = self.__rule_name
+
+ rule_obj = self.__get_rule_key_by_name(rule_name=rule_name)
+
+ if rule_obj is not None:
+
+ rule_key = int(rule_obj.key)
+ rule_spec = vim.cluster.RuleSpec(removeKey=rule_key, operation='remove')
+ config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
+
+ if not self.module.check_mode:
+
+ task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True)
+ wait_for_task(task)
+
+ self.__changed = True
+
+ if self.__changed:
+ self.__msg = "Deleted DRS rule `%s` successfully" % (self.__rule_name)
+ else:
+ self.__msg = "DRS Rule `%s` does not exists or already deleted" % (self.__rule_name)
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ vm_group_name=dict(type='str'),
+ host_group_name=dict(type='str'),
+ cluster_name=dict(type='str', required=True),
+ datacenter=dict(type='str', required=False, aliases=['datacenter_name']),
+ drs_rule_name=dict(type='str', required=True),
+ enabled=dict(type='bool', default=False),
+ mandatory=dict(type='bool', default=False),
+ affinity_rule=dict(type='bool', default=True))
+ )
+
+ required_if = [
+ ['state', 'present', ['vm_group_name', 'host_group_name']],
+ ]
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ try:
+ # Create instance of VmwareDrsGroupManager
+ vm_host_drs = VmwareVmHostRuleDrs(module=module)
+
+ if module.params['state'] == 'present':
+ vm_host_drs.create()
+ elif module.params['state'] == 'absent':
+ vm_host_drs.delete()
+
+ # Set results
+ results = dict(msg=vm_host_drs.get_msg(),
+ failed=False,
+ changed=vm_host_drs.get_changed(),
+ result=vm_host_drs.get_result())
+
+ except Exception as error:
+ results = dict(failed=True, msg="Error: `%s`" % error)
+
+ if results['failed']:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vm_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_vm_info.py
new file mode 100644
index 000000000..142cd3b0b
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vm_info.py
@@ -0,0 +1,479 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Fedor Vompe <f.vompe () comptek.ru>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vm_info
+short_description: Return basic info pertaining to a VMware machine guest
+description:
+- Return basic information pertaining to a vSphere or ESXi virtual machine guest.
+- Cluster name as fact is added in version 2.7.
+author:
+- Joseph Callen (@jcpowermac)
+- Abhijeet Kasurde (@Akasurde)
+- Fedor Vompe (@sumkincpp)
+notes:
+- Fact about C(moid) added in VMware collection 1.4.0.
+- Fact about C(datastore_url) is added in VMware collection 1.18.0.
+options:
+ vm_type:
+ description:
+ - If set to C(vm), then information are gathered for virtual machines only.
+ - If set to C(template), then information are gathered for virtual machine templates only.
+ - If set to C(all), then information are gathered for all virtual machines and virtual machine templates.
+ required: false
+ default: 'all'
+ choices: [ all, vm, template ]
+ type: str
+ show_attribute:
+ description:
+ - Attributes related to VM guest shown in information only when this is set C(true).
+ default: false
+ type: bool
+ folder:
+ description:
+ - Specify a folder location of VMs to gather information from.
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ show_cluster:
+ description:
+ - Tags virtual machine's cluster is shown if set to C(true).
+ version_added: '3.5.0'
+ default: true
+ type: bool
+ show_datacenter:
+ description:
+ - Tags virtual machine's datacenter is shown if set to C(true).
+ version_added: '3.5.0'
+ default: true
+ type: bool
+ show_datastore:
+ description:
+ - Tags virtual machine's datastore is shown if set to C(true).
+ version_added: '3.5.0'
+ default: true
+ type: bool
+ show_esxi_hostname:
+ description:
+ - Tags virtual machine's ESXi host is shown if set to C(true).
+ version_added: '3.5.0'
+ default: true
+ type: bool
+ show_folder:
+ description:
+ - Show folders
+ version_added: '3.7.0'
+ default: true
+ type: bool
+ show_mac_address:
+ description:
+ - Tags virtual machine's mac address is shown if set to C(true).
+ version_added: '3.5.0'
+ default: true
+ type: bool
+ show_net:
+ description:
+ - Tags virtual machine's network is shown if set to C(true).
+ version_added: '3.5.0'
+ default: true
+ type: bool
+ show_resource_pool:
+ description:
+ - Tags virtual machine's resource pool is shown if set to C(true).
+ version_added: '3.5.0'
+ default: true
+ type: bool
+ show_tag:
+ description:
+ - Tags related to virtual machine are shown if set to C(true).
+ default: false
+ type: bool
+ show_allocated:
+ version_added: '2.5.0'
+ description:
+ - Allocated storage in byte and memory in MB are shown if it set to True.
+ default: false
+ type: bool
+ vm_name:
+ description:
+ - Name of the virtual machine to get related configurations information from.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Gather all registered virtual machines
+ community.vmware.vmware_vm_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ delegate_to: localhost
+ register: vminfo
+
+- debug:
+ var: vminfo.virtual_machines
+
+- name: Gather one specific VM
+ community.vmware.vmware_vm_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ vm_name: 'vm_name_as_per_vcenter'
+ delegate_to: localhost
+ register: vm_info
+
+- debug:
+ var: vminfo.virtual_machines
+
+- name: Gather only registered virtual machine templates
+ community.vmware.vmware_vm_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ vm_type: template
+ delegate_to: localhost
+ register: template_info
+
+- debug:
+ var: template_info.virtual_machines
+
+- name: Gather only registered virtual machines
+ community.vmware.vmware_vm_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ vm_type: vm
+ delegate_to: localhost
+ register: vm_info
+
+- debug:
+ var: vm_info.virtual_machines
+
+- name: Get UUID from given VM Name
+ block:
+ - name: Get virtual machine info
+ community.vmware.vmware_vm_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ folder: "/datacenter/vm/folder"
+ delegate_to: localhost
+ register: vm_info
+
+ - debug:
+ msg: "{{ item.uuid }}"
+ with_items:
+ - "{{ vm_info.virtual_machines | community.general.json_query(query) }}"
+ vars:
+ query: "[?guest_name=='DC0_H0_VM0']"
+
+- name: Get Tags from given VM Name
+ block:
+ - name: Get virtual machine info
+ community.vmware.vmware_vm_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ folder: "/datacenter/vm/folder"
+ delegate_to: localhost
+ register: vm_info
+
+ - debug:
+ msg: "{{ item.tags }}"
+ with_items:
+ - "{{ vm_info.virtual_machines | community.general.json_query(query) }}"
+ vars:
+ query: "[?guest_name=='DC0_H0_VM0']"
+
+- name: Gather all VMs from a specific folder
+ community.vmware.vmware_vm_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ folder: "/Asia-Datacenter1/vm/prod"
+ delegate_to: localhost
+ register: vm_info
+
+- name: Get datastore_url from given VM name
+ block:
+ - name: Get virtual machine info
+ community.vmware.vmware_vm_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ delegate_to: localhost
+ register: vm_info
+
+ - debug:
+ msg: "{{ item.datastore_url }}"
+ with_items:
+ - "{{ vm_info.virtual_machines | community.general.json_query(query) }}"
+ vars:
+ query: "[?guest_name=='DC0_H0_VM0']"
+'''
+
+RETURN = r'''
+virtual_machines:
+ description: list of dictionary of virtual machines and their information
+ returned: success
+ type: list
+ sample: [
+ {
+ "guest_name": "ubuntu_t",
+ "datacenter": "Datacenter-1",
+ "cluster": null,
+ "esxi_hostname": "10.76.33.226",
+ "folder": "/Datacenter-1/vm",
+ "guest_fullname": "Ubuntu Linux (64-bit)",
+ "ip_address": "",
+ "mac_address": [
+ "00:50:56:87:a5:9a"
+ ],
+ "power_state": "poweredOff",
+ "uuid": "4207072c-edd8-3bd5-64dc-903fd3a0db04",
+ "vm_network": {
+ "00:50:56:87:a5:9a": {
+ "ipv4": [
+ "10.76.33.228"
+ ],
+ "ipv6": []
+ }
+ },
+ "attributes": {
+ "job": "backup-prepare"
+ },
+ "datastore_url": [
+ {
+ "name": "t880-o2g",
+ "url": "/vmfs/volumes/e074264a-e5c82a58"
+ }
+ ],
+ "tags": [
+ {
+ "category_id": "urn:vmomi:InventoryServiceCategory:b316cc45-f1a9-4277-811d-56c7e7975203:GLOBAL",
+ "category_name": "cat_0001",
+ "description": "",
+ "id": "urn:vmomi:InventoryServiceTag:43737ec0-b832-4abf-abb1-fd2448ce3b26:GLOBAL",
+ "name": "tag_0001"
+ }
+ ],
+ "moid": "vm-24",
+ "allocated": {
+ "storage": 500000000,
+ "cpu": 2,
+ "memory": 16
+ },
+ }
+ ]
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, \
+ get_all_objs, vmware_argument_spec, _get_vm_prop, get_parent_datacenter, find_vm_by_name
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+
+
+class VmwareVmInfo(PyVmomi):
+ def __init__(self, module):
+ super(VmwareVmInfo, self).__init__(module)
+ if self.module.params.get('show_tag'):
+ self.vmware_client = VmwareRestClient(self.module)
+
+ def get_tag_info(self, vm_dynamic_obj):
+ return self.vmware_client.get_tags_for_vm(vm_mid=vm_dynamic_obj._moId)
+
+ def get_vm_attributes(self, vm):
+ return dict((x.name, v.value) for x in self.custom_field_mgr
+ for v in vm.customValue if x.key == v.key)
+
+ # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getallvms.py
+ def get_virtual_machines(self):
+ """
+ Get one/all virtual machines and related configurations information.
+ """
+ folder = self.params.get('folder')
+ folder_obj = None
+ if folder:
+ folder_obj = self.content.searchIndex.FindByInventoryPath(folder)
+ if not folder_obj:
+ self.module.fail_json(msg="Failed to find folder specified by %(folder)s" % self.params)
+
+ vm_name = self.params.get('vm_name')
+ if vm_name:
+ virtual_machine = find_vm_by_name(self.content, vm_name=vm_name, folder=folder_obj)
+ if not virtual_machine:
+ self.module.fail_json(msg="Failed to find virtual machine %s" % vm_name)
+ else:
+ virtual_machines = [virtual_machine]
+ else:
+ virtual_machines = get_all_objs(self.content, [vim.VirtualMachine], folder=folder_obj)
+ _virtual_machines = []
+
+ for vm in virtual_machines:
+ _ip_address = ""
+ summary = vm.summary
+ if summary.guest is not None:
+ _ip_address = summary.guest.ipAddress
+ if _ip_address is None:
+ _ip_address = ""
+ _mac_address = []
+ if self.module.params.get('show_mac_address'):
+ all_devices = _get_vm_prop(vm, ('config', 'hardware', 'device'))
+ if all_devices:
+ for dev in all_devices:
+ if isinstance(dev, vim.vm.device.VirtualEthernetCard):
+ _mac_address.append(dev.macAddress)
+
+ net_dict = {}
+ if self.module.params.get('show_net'):
+ vmnet = _get_vm_prop(vm, ('guest', 'net'))
+ if vmnet:
+ for device in vmnet:
+ net_dict[device.macAddress] = dict()
+ net_dict[device.macAddress]['ipv4'] = []
+ net_dict[device.macAddress]['ipv6'] = []
+ for ip_addr in device.ipAddress:
+ if "::" in ip_addr:
+ net_dict[device.macAddress]['ipv6'].append(ip_addr)
+ else:
+ net_dict[device.macAddress]['ipv4'].append(ip_addr)
+
+ esxi_hostname = None
+ esxi_parent = None
+
+ if self.module.params.get('show_esxi_hostname') or self.module.params.get('show_cluster'):
+ if summary.runtime.host:
+ esxi_hostname = summary.runtime.host.summary.config.name
+ esxi_parent = summary.runtime.host.parent
+
+ cluster_name = None
+ if self.module.params.get('show_cluster'):
+ if esxi_parent and isinstance(esxi_parent, vim.ClusterComputeResource):
+ cluster_name = summary.runtime.host.parent.name
+
+ resource_pool = None
+ if self.module.params.get('show_resource_pool'):
+ if vm.resourcePool and vm.resourcePool != vm.resourcePool.owner.resourcePool:
+ resource_pool = vm.resourcePool.name
+
+ vm_attributes = dict()
+ if self.module.params.get('show_attribute'):
+ vm_attributes = self.get_vm_attributes(vm)
+
+ vm_tags = list()
+ if self.module.params.get('show_tag'):
+ vm_tags = self.get_tag_info(vm)
+
+ allocated = {}
+ if self.module.params.get('show_allocated'):
+ storage_allocated = 0
+ for device in vm.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualDisk):
+ storage_allocated += device.capacityInBytes
+ allocated = {
+ "storage": storage_allocated,
+ "cpu": vm.config.hardware.numCPU,
+ "memory": vm.config.hardware.memoryMB}
+
+ vm_folder = None
+ if self.module.params.get('show_folder'):
+ vm_folder = PyVmomi.get_vm_path(content=self.content, vm_name=vm)
+
+ datacenter = None
+ if self.module.params.get('show_datacenter'):
+ datacenter = get_parent_datacenter(vm)
+ datastore_url = list()
+ if self.module.params.get('show_datastore'):
+ datastore_attributes = ('name', 'url')
+ vm_datastore_urls = _get_vm_prop(vm, ('config', 'datastoreUrl'))
+ if vm_datastore_urls:
+ for entry in vm_datastore_urls:
+ datastore_url.append({key: getattr(entry, key) for key in dir(entry) if key in datastore_attributes})
+ virtual_machine = {
+ "guest_name": summary.config.name,
+ "guest_fullname": summary.config.guestFullName,
+ "power_state": summary.runtime.powerState,
+ "ip_address": _ip_address, # Kept for backward compatibility
+ "mac_address": _mac_address, # Kept for backward compatibility
+ "uuid": summary.config.uuid,
+ "vm_network": net_dict,
+ "esxi_hostname": esxi_hostname,
+ "datacenter": None if datacenter is None else datacenter.name,
+ "cluster": cluster_name,
+ "resource_pool": resource_pool,
+ "attributes": vm_attributes,
+ "tags": vm_tags,
+ "folder": vm_folder,
+ "moid": vm._moId,
+ "datastore_url": datastore_url,
+ "allocated": allocated
+ }
+
+ vm_type = self.module.params.get('vm_type')
+ is_template = _get_vm_prop(vm, ('config', 'template'))
+ if vm_type == 'vm' and not is_template:
+ _virtual_machines.append(virtual_machine)
+ elif vm_type == 'template' and is_template:
+ _virtual_machines.append(virtual_machine)
+ elif vm_type == 'all':
+ _virtual_machines.append(virtual_machine)
+ return _virtual_machines
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ vm_type=dict(type='str', choices=['vm', 'all', 'template'], default='all'),
+ show_attribute=dict(type='bool', default='no'),
+ show_cluster=dict(type='bool', default=True),
+ show_datacenter=dict(type='bool', default=True),
+ show_datastore=dict(type='bool', default=True),
+ show_folder=dict(type='bool', default=True),
+ show_esxi_hostname=dict(type='bool', default=True),
+ show_mac_address=dict(type='bool', default=True),
+ show_net=dict(type='bool', default=True),
+ show_resource_pool=dict(type='bool', default=True),
+ show_tag=dict(type='bool', default=False),
+ show_allocated=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ vm_name=dict(type='str')
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ vmware_vm_info = VmwareVmInfo(module)
+ _virtual_machines = vmware_vm_info.get_virtual_machines()
+
+ module.exit_json(changed=False, virtual_machines=_virtual_machines)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vm_shell.py b/ansible_collections/community/vmware/plugins/modules/vmware_vm_shell.py
new file mode 100644
index 000000000..c3ae86a2a
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vm_shell.py
@@ -0,0 +1,365 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015-16, Ritesh Khadgaray <khadgaray () gmail.com>
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vm_shell
+short_description: Run commands in a VMware guest operating system
+description:
+ - Module allows user to run common system administration commands in the guest operating system.
+author:
+ - Ritesh Khadgaray (@ritzk)
+ - Abhijeet Kasurde (@Akasurde)
+notes:
+ - Only the first match against vm_id is used, even if there are multiple matches.
+options:
+ datacenter:
+ description:
+ - The datacenter hosting the virtual machine.
+ - If set, it will help to speed up virtual machine search.
+ type: str
+ cluster:
+ description:
+ - The cluster hosting the virtual machine.
+ - If set, it will help to speed up virtual machine search.
+ type: str
+ folder:
+ description:
+ - Destination folder, absolute or relative path to find an existing guest or create the new guest.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter.
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ type: str
+ vm_id:
+ description:
+ - Name of the virtual machine to work with.
+ required: true
+ type: str
+ vm_id_type:
+ description:
+ - The VMware identification method by which the virtual machine will be identified.
+ default: vm_name
+ choices: ['uuid', 'instance_uuid', 'dns_name', 'inventory_path', 'vm_name']
+ type: str
+ vm_username:
+ description:
+ - The user to login-in to the virtual machine.
+ required: true
+ type: str
+ vm_password:
+ description:
+ - The password used to login-in to the virtual machine.
+ required: true
+ type: str
+ vm_shell:
+ description:
+ - The absolute path to the program to start.
+ - On Linux, shell is executed via bash.
+ required: true
+ type: str
+ vm_shell_args:
+ description:
+ - The argument to the program.
+ - The characters which must be escaped to the shell also be escaped on the command line provided.
+ default: " "
+ type: str
+ vm_shell_env:
+ description:
+ - Comma separated list of environment variable, specified in the guest OS notation.
+ type: list
+ elements: str
+ vm_shell_cwd:
+ description:
+ - The current working directory of the application from which it will be run.
+ type: str
+ wait_for_process:
+ description:
+ - If set to C(true), module will wait for process to complete in the given virtual machine.
+ default: false
+ type: bool
+ timeout:
+ description:
+ - Timeout in seconds.
+ - If set to positive integers, then C(wait_for_process) will honor this parameter and will exit after this timeout.
+ default: 3600
+ type: int
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Run command inside a virtual machine
+ community.vmware.vmware_vm_shell:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ folder: "/{{datacenter}}/vm"
+ vm_id: "{{ vm_name }}"
+ vm_username: root
+ vm_password: superSecret
+ vm_shell: /bin/echo
+ vm_shell_args: " $var >> myFile "
+ vm_shell_env:
+ - "PATH=/bin"
+ - "VAR=test"
+ vm_shell_cwd: "/tmp"
+ delegate_to: localhost
+ register: shell_command_output
+
+- name: Run command inside a virtual machine with wait and timeout
+ community.vmware.vmware_vm_shell:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ folder: "/{{datacenter}}/vm"
+ vm_id: NameOfVM
+ vm_username: root
+ vm_password: superSecret
+ vm_shell: /bin/sleep
+ vm_shell_args: 100
+ wait_for_process: true
+ timeout: 2000
+ delegate_to: localhost
+ register: shell_command_with_wait_timeout
+
+- name: Change user password in the guest machine
+ community.vmware.vmware_vm_shell:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ folder: "/{{datacenter}}/vm"
+ vm_id: "{{ vm_name }}"
+ vm_username: sample
+ vm_password: old_password
+ vm_shell: "/bin/echo"
+ vm_shell_args: "-e 'old_password\nnew_password\nnew_password' | passwd sample > /tmp/$$.txt 2>&1"
+ delegate_to: localhost
+
+- name: Change hostname of guest machine
+ community.vmware.vmware_vm_shell:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ datacenter }}"
+ folder: "/{{datacenter}}/vm"
+ vm_id: "{{ vm_name }}"
+ vm_username: testUser
+ vm_password: SuperSecretPassword
+ vm_shell: "/usr/bin/hostnamectl"
+ vm_shell_args: "set-hostname new_hostname > /tmp/$$.txt 2>&1"
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+results:
+ description: metadata about the new process after completion with wait_for_process
+ returned: on success
+ type: dict
+ sample:
+ {
+ "cmd_line": "\"/bin/sleep\" 1",
+ "end_time": "2018-04-26T05:03:21+00:00",
+ "exit_code": 0,
+ "name": "sleep",
+ "owner": "dev1",
+ "start_time": "2018-04-26T05:03:19+00:00",
+ "uuid": "564db1e2-a3ff-3b0e-8b77-49c25570bb66",
+ }
+'''
+
+import time
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi, find_cluster_by_name, find_datacenter_by_name, find_vm_by_id,
+ vmware_argument_spec)
+
+
+class VMwareShellManager(PyVmomi):
+ def __init__(self, module):
+ super(VMwareShellManager, self).__init__(module)
+ datacenter_name = module.params['datacenter']
+ cluster_name = module.params['cluster']
+ folder = module.params['folder']
+ try:
+ self.pm = self.content.guestOperationsManager.processManager
+ except vmodl.fault.ManagedObjectNotFound:
+ pass
+ self.timeout = self.params.get('timeout', 3600)
+ self.wait_for_pid = self.params.get('wait_for_process', False)
+
+ datacenter = None
+ if datacenter_name:
+ datacenter = find_datacenter_by_name(self.content, datacenter_name)
+ if not datacenter:
+ module.fail_json(changed=False, msg="Unable to find %(datacenter)s datacenter" % module.params)
+
+ cluster = None
+ if cluster_name:
+ cluster = find_cluster_by_name(self.content, cluster_name, datacenter)
+ if not cluster:
+ module.fail_json(changed=False, msg="Unable to find %(cluster)s cluster" % module.params)
+
+ if module.params['vm_id_type'] == 'inventory_path':
+ vm = find_vm_by_id(self.content,
+ vm_id=module.params['vm_id'],
+ vm_id_type="inventory_path",
+ folder=folder)
+ else:
+ vm = find_vm_by_id(self.content,
+ vm_id=module.params['vm_id'],
+ vm_id_type=module.params['vm_id_type'],
+ datacenter=datacenter,
+ cluster=cluster)
+
+ if not vm:
+ module.fail_json(msg='Unable to find virtual machine.')
+
+ tools_status = vm.guest.toolsStatus
+ if tools_status in ['toolsNotInstalled', 'toolsNotRunning']:
+ self.module.fail_json(msg="VMwareTools is not installed or is not running in the guest."
+ " VMware Tools are necessary to run this module.")
+
+ try:
+ self.execute_command(vm, module.params)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(changed=False, msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(changed=False, msg=to_native(method_fault.msg))
+ except Exception as e:
+ module.fail_json(changed=False, msg=to_native(e))
+
+ def execute_command(self, vm, params):
+ # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/execute_program_in_vm.py
+ vm_username = params['vm_username']
+ vm_password = params['vm_password']
+ program_path = params['vm_shell']
+ args = params['vm_shell_args']
+ env = params['vm_shell_env']
+ cwd = params['vm_shell_cwd']
+
+ credentials = vim.vm.guest.NamePasswordAuthentication(username=vm_username,
+ password=vm_password)
+ cmd_spec = vim.vm.guest.ProcessManager.ProgramSpec(arguments=args,
+ envVariables=env,
+ programPath=program_path,
+ workingDirectory=cwd)
+
+ res = self.pm.StartProgramInGuest(vm=vm, auth=credentials, spec=cmd_spec)
+ if self.wait_for_pid:
+ res_data = self.wait_for_process(vm, res, credentials)
+ results = dict(uuid=vm.summary.config.uuid,
+ owner=res_data.owner,
+ start_time=res_data.startTime.isoformat(),
+ end_time=res_data.endTime.isoformat(),
+ exit_code=res_data.exitCode,
+ name=res_data.name,
+ cmd_line=res_data.cmdLine)
+
+ if res_data.exitCode != 0:
+ results['msg'] = "Failed to execute command"
+ results['changed'] = False
+ results['failed'] = True
+ self.module.fail_json(**results)
+ else:
+ results['changed'] = True
+ results['failed'] = False
+ self.module.exit_json(**results)
+ else:
+ self.module.exit_json(changed=True, uuid=vm.summary.config.uuid, msg=res)
+
+ def process_exists_in_guest(self, vm, pid, creds):
+ res = self.pm.ListProcessesInGuest(vm, creds, pids=[pid])
+ if not res:
+ self.module.fail_json(
+ changed=False, msg='ListProcessesInGuest: None (unexpected)')
+ res = res[0]
+ if res.exitCode is None:
+ return True, None
+ else:
+ return False, res
+
+ def wait_for_process(self, vm, pid, creds):
+ start_time = time.time()
+ while True:
+ current_time = time.time()
+ process_status, res_data = self.process_exists_in_guest(vm, pid, creds)
+ if not process_status:
+ return res_data
+ elif current_time - start_time >= self.timeout:
+ self.module.fail_json(
+ msg="Timeout waiting for process to complete.",
+ vm=vm._moId,
+ pid=pid,
+ start_time=start_time,
+ current_time=current_time,
+ timeout=self.timeout)
+ else:
+ time.sleep(5)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ datacenter=dict(type='str'),
+ cluster=dict(type='str'),
+ folder=dict(type='str'),
+ vm_id=dict(type='str', required=True),
+ vm_id_type=dict(default='vm_name', type='str',
+ choices=['inventory_path',
+ 'uuid',
+ 'instance_uuid',
+ 'dns_name',
+ 'vm_name']),
+ vm_username=dict(type='str', required=True),
+ vm_password=dict(type='str', no_log=True, required=True),
+ vm_shell=dict(type='str', required=True),
+ vm_shell_args=dict(default=" ", type='str'),
+ vm_shell_env=dict(type='list', elements='str'),
+ vm_shell_cwd=dict(type='str'),
+ wait_for_process=dict(type='bool', default=False),
+ timeout=dict(type='int', default=3600),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ required_if=[
+ ['vm_id_type', 'inventory_path', ['folder']]
+ ],
+ )
+
+ VMwareShellManager(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vm_storage_policy.py b/ansible_collections/community/vmware/plugins/modules/vmware_vm_storage_policy.py
new file mode 100644
index 000000000..e1cf859bf
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vm_storage_policy.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Ansible Project
+# Copyright: (c) 2020, Dustin Scott <sdustin@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: vmware_vm_storage_policy
+short_description: Create vSphere storage policies
+description:
+- A vSphere storage policy defines metadata that describes storage requirements
+ for virtual machines and storage capabilities of storage providers.
+- Currently, only tag-based storage policy creation is supported.
+author:
+- Dustin Scott (@scottd018)
+options:
+ name:
+ description:
+ - Name of the storage policy to create, update, or delete.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the storage policy to create or update.
+ - This parameter is ignored when C(state=absent).
+ type: str
+ required: false
+ tag_category:
+ description:
+ - Name of the pre-existing tag category to assign to the storage policy.
+ - This parameter is ignored when C(state=absent).
+ - This parameter is required when C(state=present).
+ required: false
+ type: str
+ tag_name:
+ description:
+ - Name of the pre-existing tag to assign to the storage policy.
+ - This parameter is ignored when C(state=absent).
+ - This parameter is required when C(state=present).
+ required: false
+ type: str
+ tag_affinity:
+ description:
+ - If set to C(true), the storage policy enforces that virtual machines require the existence of a tag for datastore placement.
+ - If set to C(false), the storage policy enforces that virtual machines require the absence of a tag for datastore placement.
+ - This parameter is ignored when C(state=absent).
+ required: false
+ type: bool
+ default: true
+ state:
+ description:
+ - State of storage policy.
+ - If set to C(present), the storage policy is created.
+ - If set to C(absent), the storage policy is deleted.
+ default: present
+ choices: [ absent, present ]
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Create or update a vSphere tag-based storage policy
+ community.vmware.vmware_vm_storage_policy:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ name: "vSphere storage policy"
+ description: "vSphere storage performance policy"
+ tag_category: "performance_tier"
+ tag_name: "gold"
+ tag_affinity: true
+ state: "present"
+ delegate_to: localhost
+
+- name: Remove a vSphere tag-based storage policy
+ community.vmware.vmware_vm_storage_policy:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ name: "vSphere storage policy"
+ state: "absent"
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+vmware_vm_storage_policy:
+ description: dictionary of information for the storage policy
+ returned: success
+ type: dict
+ sample: {
+ "vmware_vm_storage_policy": {
+ "description": "Storage policy for gold-tier storage",
+ "id": "aa6d5a82-1c88-45da-85d3-3d74b91a5bad",
+ "name": "gold"
+ }
+ }
+'''
+
+try:
+ from pyVmomi import pbm
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware_spbm import SPBM
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec
+from ansible_collections.community.vmware.plugins.module_utils.vmware_rest_client import VmwareRestClient
+
+
+class VmwareStoragePolicyManager(SPBM):
+ def __init__(self, module):
+ super(VmwareStoragePolicyManager, self).__init__(module)
+ self.rest_client = VmwareRestClient(module)
+
+ #
+ # MOB METHODS
+ #
+ # These will generate the individual items with the following expected structure (see
+ # https://github.com/vmware/pyvmomi/blob/master/pyVmomi/PbmObjects.py):
+ #
+ # PbmProfile: array
+ # - name: string
+ # description: string
+ # constraints: PbmCapabilityConstraints
+ # subProfiles: ArrayOfPbmCapabilitySubProfile
+ # - name: string
+ # capability: ArrayOfPbmCapabilityInstance
+ # - constraint: ArrayOfCapabilityConstraintInstance
+ # - id: string
+ # value: anyType
+ # values: arrayOfStrings
+ # - tags
+ #
+ #
+ def create_mob_tag_values(self, tags):
+ return pbm.capability.types.DiscreteSet(values=tags)
+
+ def create_mob_capability_property_instance(self, tag_id, tag_operator, tags):
+ return pbm.capability.PropertyInstance(
+ id=tag_id,
+ operator=tag_operator,
+ value=self.create_mob_tag_values(tags)
+ )
+
+ def create_mob_capability_constraint_instance(self, tag_id, tag_operator, tags):
+ return pbm.capability.ConstraintInstance(
+ propertyInstance=[self.create_mob_capability_property_instance(tag_id, tag_operator, tags)]
+ )
+
+ def create_mob_capability_metadata_uniqueid(self, tag_category):
+ return pbm.capability.CapabilityMetadata.UniqueId(
+ namespace="http://www.vmware.com/storage/tag",
+ id=tag_category
+ )
+
+ def create_mob_capability_instance(self, tag_id, tag_operator, tags, tag_category):
+ return pbm.capability.CapabilityInstance(
+ id=self.create_mob_capability_metadata_uniqueid(tag_category),
+ constraint=[self.create_mob_capability_constraint_instance(tag_id, tag_operator, tags)]
+ )
+
+ def create_mob_capability_constraints_subprofile(self, tag_id, tag_operator, tags, tag_category):
+ return pbm.profile.SubProfileCapabilityConstraints.SubProfile(
+ name="Tag based placement",
+ capability=[self.create_mob_capability_instance(tag_id, tag_operator, tags, tag_category)]
+ )
+
+ def create_mob_capability_subprofile(self, tag_id, tag_operator, tags, tag_category):
+ return pbm.profile.SubProfileCapabilityConstraints(
+ subProfiles=[self.create_mob_capability_constraints_subprofile(tag_id, tag_operator, tags, tag_category)]
+ )
+
+ def create_mob_pbm_update_spec(self, tag_id, tag_operator, tags, tag_category, description):
+ return pbm.profile.CapabilityBasedProfileUpdateSpec(
+ description=description,
+ constraints=self.create_mob_capability_subprofile(tag_id, tag_operator, tags, tag_category)
+ )
+
+ def create_mob_pbm_create_spec(self, tag_id, tag_operator, tags, tag_category, description, name):
+ return pbm.profile.CapabilityBasedProfileCreateSpec(
+ name=name,
+ description=description,
+ resourceType=pbm.profile.ResourceType(resourceType="STORAGE"),
+ category="REQUIREMENT",
+ constraints=self.create_mob_capability_subprofile(tag_id, tag_operator, tags, tag_category)
+ )
+
+ def get_tag_constraints(self, capabilities):
+ """
+ Return tag constraints for a profile given its capabilities
+ """
+ tag_constraints = {}
+ for capability in capabilities:
+ for constraint in capability.constraint:
+ if hasattr(constraint, 'propertyInstance'):
+ for propertyInstance in constraint.propertyInstance:
+ if hasattr(propertyInstance.value, 'values'):
+ tag_constraints['id'] = propertyInstance.id
+ tag_constraints['values'] = propertyInstance.value.values
+ tag_constraints['operator'] = propertyInstance.operator
+
+ return tag_constraints
+
+ def get_profile_manager(self):
+ self.get_spbm_connection()
+
+ return self.spbm_content.profileManager
+
+ def get_storage_policies(self, profile_manager):
+ profile_ids = profile_manager.PbmQueryProfile(
+ resourceType=pbm.profile.ResourceType(resourceType="STORAGE"),
+ profileCategory="REQUIREMENT"
+ )
+ profiles = []
+ if profile_ids:
+ profiles = profile_manager.PbmRetrieveContent(profileIds=profile_ids)
+
+ return profiles
+
+ def format_profile(self, profile):
+ formatted_profile = {
+ 'name': profile.name,
+ 'id': profile.profileId.uniqueId,
+ 'description': profile.description
+ }
+
+ return formatted_profile
+
+ def format_tag_mob_id(self, tag_category):
+ return "com.vmware.storage.tag." + tag_category + ".property"
+
+ def format_results_and_exit(self, results, policy, changed):
+ results['vmware_vm_storage_policy'] = self.format_profile(policy)
+ results['changed'] = changed
+
+ self.module.exit_json(**results)
+
+ def update_storage_policy(self, policy, pbm_client, results):
+ expected_description = self.params.get('description')
+ expected_tags = [self.params.get('tag_name')]
+ expected_tag_category = self.params.get('tag_category')
+ expected_tag_id = self.format_tag_mob_id(expected_tag_category)
+ expected_operator = "NOT"
+ if self.params.get('tag_affinity'):
+ expected_operator = None
+
+ needs_change = False
+
+ if policy.description != expected_description:
+ needs_change = True
+
+ if hasattr(policy.constraints, 'subProfiles'):
+ for subprofile in policy.constraints.subProfiles:
+ tag_constraints = self.get_tag_constraints(subprofile.capability)
+ if tag_constraints['id'] == expected_tag_id:
+ if tag_constraints['values'] != expected_tags:
+ needs_change = True
+ else:
+ needs_change = True
+
+ if tag_constraints['operator'] != expected_operator:
+ needs_change = True
+ else:
+ needs_change = True
+
+ if needs_change:
+ pbm_client.PbmUpdate(
+ profileId=policy.profileId,
+ updateSpec=self.create_mob_pbm_update_spec(expected_tag_id, expected_operator, expected_tags, expected_tag_category, expected_description)
+ )
+
+ self.format_results_and_exit(results, policy, needs_change)
+
+ def remove_storage_policy(self, policy, pbm_client, results):
+ pbm_client.PbmDelete(profileId=[policy.profileId])
+
+ self.format_results_and_exit(results, policy, True)
+
+ def create_storage_policy(self, policy, pbm_client, results):
+ profile_ids = pbm_client.PbmCreate(
+ createSpec=self.create_mob_pbm_create_spec(
+ self.format_tag_mob_id(self.params.get('tag_category')),
+ None,
+ [self.params.get('tag_name')],
+ self.params.get('tag_category'),
+ self.params.get('description'),
+ self.params.get('name')
+ )
+ )
+
+ policy = pbm_client.PbmRetrieveContent(profileIds=[profile_ids])
+
+ self.format_results_and_exit(results, policy[0], True)
+
+ def ensure_state(self):
+ client = self.get_profile_manager()
+ policies = self.get_storage_policies(client)
+ policy_name = self.params.get('name')
+ results = dict(changed=False, vmware_vm_storage_policy={})
+
+ if self.params.get('state') == 'present':
+ if self.params.get('tag_category') is None:
+ self.module.fail_json(msg="tag_category is required when 'state' is 'present'")
+
+ if self.params.get('tag_name') is None:
+ self.module.fail_json(msg="tag_name is required when 'state' is 'present'")
+
+ # ensure if the category exists
+ category_result = self.rest_client.get_category_by_name(self.params.get('tag_category'))
+ if category_result is None:
+ self.module.fail_json(msg="%s is not found in vCenter Server tag categories" % self.params.get('tag_category'))
+
+ # ensure if the tag exists
+ tag_result = self.rest_client.get_tag_by_category_name(self.params.get('tag_name'), self.params.get('tag_category'))
+ if tag_result is None:
+ self.module.fail_json(msg="%s is not found in vCenter Server tags" % self.params.get('tag_name'))
+
+ # loop through and update the first match
+ for policy in policies:
+ if policy.name == policy_name:
+ self.update_storage_policy(policy, client, results)
+
+ # if we didn't exit by now create the profile
+ self.create_storage_policy(policy, client, results)
+
+ if self.params.get('state') == 'absent':
+ # loop through and delete the first match
+ for policy in policies:
+ if policy.name == policy_name:
+ self.remove_storage_policy(policy, client, results)
+
+ # if we didn't exit by now exit without changing anything
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ description=dict(type='str', required=False),
+ tag_name=dict(type='str', required=False),
+ tag_category=dict(type='str', required=False),
+ tag_affinity=dict(type='bool', default=True),
+ state=dict(type='str', choices=['absent', 'present'], default='present')
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+ manager = VmwareStoragePolicyManager(module)
+
+ manager.ensure_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vm_storage_policy_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_vm_storage_policy_info.py
new file mode 100644
index 000000000..7f2873998
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vm_storage_policy_info.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vm_storage_policy_info
+short_description: Gather information about vSphere storage profile defined storage policy information.
+description:
+- Returns basic information on vSphere storage profiles.
+- A vSphere storage profile defines storage policy information that describes storage requirements
+ for virtual machines and storage capabilities of storage providers.
+author:
+- Abhijeet Kasurde (@Akasurde)
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Get SPBM info
+ community.vmware.vmware_vm_storage_policy_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ delegate_to: localhost
+ register: profiles
+'''
+
+RETURN = r'''
+spbm_profiles:
+ description: list of dictionary of SPBM info
+ returned: success
+ type: list
+ sample: [
+ {
+ "constraints_sub_profiles": [
+ {
+ "rule_set_info": [
+ {
+ "id": "hostFailuresToTolerate",
+ "value": 1
+ },
+ {
+ "id": "stripeWidth",
+ "value": 1
+ },
+ {
+ "id": "forceProvisioning",
+ "value": false
+ },
+ {
+ "id": "proportionalCapacity",
+ "value": 0
+ },
+ {
+ "id": "cacheReservation",
+ "value": 0
+ }
+ ],
+ "rule_set_name": "VSAN sub-profile"
+ }
+ ],
+ "description": "Storage policy used as default for vSAN datastores",
+ "id": "aa6d5a82-1c88-45da-85d3-3d74b91a5bad",
+ "name": "vSAN Default Storage Policy"
+ },
+ ]
+'''
+
+try:
+ from pyVmomi import pbm
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware_spbm import SPBM
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec
+
+
+class SPBMClient(SPBM):
+ def __init__(self, module):
+ super(SPBMClient, self).__init__(module)
+
+ def show_capabilities(self, capabilities):
+ """
+ Return property instance for given capabilities
+ """
+ capabilities_info = []
+ for capability in capabilities:
+ for constraint in capability.constraint:
+ if hasattr(constraint, 'propertyInstance'):
+ for propertyInstance in constraint.propertyInstance:
+ capabilities_info.append(
+ {
+ 'id': propertyInstance.id,
+ 'value': propertyInstance.value
+ }
+ )
+ return capabilities_info
+
+ def get_storage_policy_info(self):
+ self.get_spbm_connection()
+
+ results = dict(changed=False, spbm_profiles=[])
+ profile_manager = self.spbm_content.profileManager
+ profile_ids = profile_manager.PbmQueryProfile(
+ resourceType=pbm.profile.ResourceType(resourceType="STORAGE"),
+ profileCategory="REQUIREMENT"
+ )
+ profiles = []
+ if profile_ids:
+ profiles = profile_manager.PbmRetrieveContent(profileIds=profile_ids)
+
+ for profile in profiles:
+ temp_profile_info = {
+ 'name': profile.name,
+ 'id': profile.profileId.uniqueId,
+ 'description': profile.description,
+ 'constraints_sub_profiles': []
+ }
+ if hasattr(profile.constraints, 'subProfiles'):
+ subprofiles = profile.constraints.subProfiles
+ temp_sub_profiles = []
+ for subprofile in subprofiles:
+ rule_set_info = self.show_capabilities(subprofile.capability)
+ # if a storage policy set tag base placement rules, the tags are set into the value.
+ # https://github.com/ansible-collections/community.vmware/issues/742
+ for _rule_set_info in rule_set_info:
+ if isinstance(_rule_set_info['value'], pbm.capability.types.DiscreteSet):
+ _rule_set_info['value'] = _rule_set_info['value'].values
+ temp_sub_profiles.append({
+ 'rule_set_name': subprofile.name,
+ 'rule_set_info': rule_set_info,
+ })
+ temp_profile_info['constraints_sub_profiles'] = temp_sub_profiles
+
+ results['spbm_profiles'].append(temp_profile_info)
+
+ self.module.exit_json(**results)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ spbm_client = SPBMClient(module)
+ spbm_client.get_storage_policy_info()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vm_vm_drs_rule.py b/ansible_collections/community/vmware/plugins/modules/vmware_vm_vm_drs_rule.py
new file mode 100644
index 000000000..178cd1423
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vm_vm_drs_rule.py
@@ -0,0 +1,409 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vm_vm_drs_rule
+short_description: Configure VMware DRS Affinity rule for virtual machines in the given cluster
+description:
+- This module can be used to configure VMware DRS Affinity rule for virtual machines in the given cluster.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Desired cluster name where virtual machines are present for the DRS rule.
+ required: true
+ type: str
+ vms:
+ description:
+ - List of virtual machines name for which DRS rule needs to be applied.
+ - Required if C(state) is set to C(present).
+ type: list
+ elements: str
+ drs_rule_name:
+ description:
+ - The name of the DRS rule to manage.
+ required: true
+ type: str
+ enabled:
+ description:
+ - If set to C(true), the DRS rule will be enabled.
+ - Effective only if C(state) is set to C(present).
+ default: false
+ type: bool
+ mandatory:
+ description:
+ - If set to C(true), the DRS rule will be mandatory.
+ - Effective only if C(state) is set to C(present).
+ default: false
+ type: bool
+ affinity_rule:
+ description:
+ - If set to C(true), the DRS rule will be an Affinity rule.
+ - If set to C(false), the DRS rule will be an Anti-Affinity rule.
+ - Effective only if C(state) is set to C(present).
+ default: true
+ type: bool
+ state:
+ description:
+ - If set to C(present), then the DRS rule is created if not present.
+ - If set to C(present), then the DRS rule is already present, it updates to the given configurations.
+ - If set to C(absent), then the DRS rule is deleted if present.
+ required: false
+ default: present
+ choices: [ present, absent ]
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create DRS Affinity Rule for VM-VM
+ community.vmware.vmware_vm_vm_drs_rule:
+ hostname: "{{ esxi_server }}"
+ username: "{{ esxi_username }}"
+ password: "{{ esxi_password }}"
+ cluster_name: "{{ cluster_name }}"
+ vms:
+ - vm1
+ - vm2
+ drs_rule_name: vm1-vm2-affinity-rule-001
+ enabled: true
+ mandatory: true
+ affinity_rule: true
+ delegate_to: localhost
+
+- name: Create DRS Anti-Affinity Rule for VM-VM
+ community.vmware.vmware_vm_vm_drs_rule:
+ hostname: "{{ esxi_server }}"
+ username: "{{ esxi_username }}"
+ password: "{{ esxi_password }}"
+ cluster_name: "{{ cluster_name }}"
+ enabled: true
+ vms:
+ - vm1
+ - vm2
+ drs_rule_name: vm1-vm2-affinity-rule-001
+ mandatory: true
+ affinity_rule: false
+ delegate_to: localhost
+
+- name: Delete DRS Affinity Rule for VM-VM
+ community.vmware.vmware_vm_vm_drs_rule:
+ hostname: "{{ esxi_server }}"
+ username: "{{ esxi_username }}"
+ password: "{{ esxi_password }}"
+ cluster_name: "{{ cluster_name }}"
+ drs_rule_name: vm1-vm2-affinity-rule-001
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: metadata about DRS VM and VM rule
+ returned: when state is present
+ type: dict
+ sample: {
+ "rule_enabled": false,
+ "rule_key": 20,
+ "rule_mandatory": true,
+ "rule_name": "drs_rule_0014",
+ "rule_uuid": "525f3bc0-253f-825a-418e-2ec93bffc9ae",
+ "rule_vms": [
+ "VM_65",
+ "VM_146"
+ ]
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi, vmware_argument_spec, wait_for_task,
+ find_vm_by_id, find_cluster_by_name)
+
+
+class VmwareDrs(PyVmomi):
+ def __init__(self, module):
+ super(VmwareDrs, self).__init__(module)
+ self.vm_list = module.params['vms']
+ self.cluster_name = module.params['cluster_name']
+ self.rule_name = module.params['drs_rule_name']
+ self.enabled = module.params['enabled']
+ self.mandatory = module.params['mandatory']
+ self.affinity_rule = module.params['affinity_rule']
+ self.state = module.params['state']
+
+ # Sanity check for cluster
+ self.cluster_obj = find_cluster_by_name(content=self.content,
+ cluster_name=self.cluster_name)
+ if self.cluster_obj is None:
+ self.module.fail_json(msg="Failed to find the cluster %s" % self.cluster_name)
+ # Sanity check for virtual machines
+ self.vm_obj_list = []
+ if self.state == 'present':
+ # Get list of VMs only if state is present
+ self.vm_obj_list = self.get_all_vms_info()
+
+ # Getter
+ def get_all_vms_info(self, vms_list=None):
+ """
+ Get all VM objects using name from given cluster
+ Args:
+ vms_list: List of VM names
+
+ Returns: List of VM managed objects
+
+ """
+ vm_obj_list = []
+ if vms_list is None:
+ vms_list = self.vm_list
+
+ for vm_name in vms_list:
+ vm_obj = find_vm_by_id(content=self.content, vm_id=vm_name,
+ vm_id_type='vm_name', cluster=self.cluster_obj)
+ if vm_obj is None:
+ self.module.fail_json(msg="Failed to find the virtual machine %s "
+ "in the given cluster %s" % (vm_name,
+ self.cluster_name))
+ vm_obj_list.append(vm_obj)
+ return vm_obj_list
+
+ def get_rule_key_by_name(self, cluster_obj=None, rule_name=None):
+ """
+ Get a specific DRS rule key by name
+ Args:
+ rule_name: Name of rule
+ cluster_obj: Cluster managed object
+
+ Returns: Rule Object if found or None
+
+ """
+ if cluster_obj is None:
+ cluster_obj = self.cluster_obj
+
+ if rule_name:
+ rules_list = [rule for rule in cluster_obj.configuration.rule if rule.name == rule_name]
+ if rules_list:
+ return rules_list[0]
+ # No rule found
+ return None
+
+ @staticmethod
+ def normalize_rule_spec(rule_obj=None):
+ """
+ Return human readable rule spec
+ Args:
+ rule_obj: Rule managed object
+
+ Returns: Dictionary with Rule info
+
+ """
+ if rule_obj is None:
+ return {}
+ return dict(rule_key=rule_obj.key,
+ rule_enabled=rule_obj.enabled,
+ rule_name=rule_obj.name,
+ rule_mandatory=rule_obj.mandatory,
+ rule_uuid=rule_obj.ruleUuid,
+ rule_vms=[vm.name for vm in rule_obj.vm],
+ rule_affinity=True if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) else False,
+ )
+
+ # Create
+ def create(self):
+ """
+ Create a DRS rule if rule does not exist
+ """
+ rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name)
+ if rule_obj is not None:
+ existing_rule = self.normalize_rule_spec(rule_obj=rule_obj)
+ if ((sorted(existing_rule['rule_vms']) == sorted(self.vm_list))
+ and (existing_rule['rule_enabled'] == self.enabled)
+ and (existing_rule['rule_mandatory'] == self.mandatory)
+ and (existing_rule['rule_affinity'] == self.affinity_rule)):
+ self.module.exit_json(changed=False, result=existing_rule, msg="Rule already exists with the same configuration")
+ return self.update_rule_spec(rule_obj)
+ return self.create_rule_spec()
+
+ def create_rule_spec(self):
+ """
+ Create DRS rule
+ """
+ changed = False
+ result = None
+ if self.affinity_rule:
+ rule = vim.cluster.AffinityRuleSpec()
+ else:
+ rule = vim.cluster.AntiAffinityRuleSpec()
+
+ rule.vm = self.vm_obj_list
+ rule.enabled = self.enabled
+ rule.mandatory = self.mandatory
+ rule.name = self.rule_name
+
+ rule_spec = vim.cluster.RuleSpec(info=rule, operation='add')
+ config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
+
+ try:
+ if not self.module.check_mode:
+ task = self.cluster_obj.ReconfigureEx(config_spec, modify=True)
+ changed, result = wait_for_task(task)
+ except vmodl.fault.InvalidRequest as e:
+ result = to_native(e.msg)
+ except Exception as e:
+ result = to_native(e)
+
+ if changed:
+ rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name)
+ result = self.normalize_rule_spec(rule_obj)
+
+ if self.module.check_mode:
+ changed = True
+ result = dict(
+ rule_key='',
+ rule_enabled=rule.enabled,
+ rule_name=self.rule_name,
+ rule_mandatory=rule.mandatory,
+ rule_uuid='',
+ rule_vms=[vm.name for vm in rule.vm],
+ rule_affinity=self.affinity_rule,
+ )
+ return changed, result
+
+ def update_rule_spec(self, rule_obj=None):
+ """
+ Update DRS rule
+ """
+ changed = False
+ result = None
+ rule_obj.vm = self.vm_obj_list
+
+ if (rule_obj.mandatory != self.mandatory):
+ rule_obj.mandatory = self.mandatory
+
+ if (rule_obj.enabled != self.enabled):
+ rule_obj.enabled = self.enabled
+
+ rule_spec = vim.cluster.RuleSpec(info=rule_obj, operation='edit')
+ config_spec = vim.cluster.ConfigSpec(rulesSpec=[rule_spec])
+
+ try:
+ if not self.module.check_mode:
+ task = self.cluster_obj.ReconfigureCluster_Task(config_spec, modify=True)
+ changed, result = wait_for_task(task)
+ else:
+ changed = True
+ except vmodl.fault.InvalidRequest as e:
+ result = to_native(e.msg)
+ except Exception as e:
+ result = to_native(e)
+
+ if changed:
+ rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name)
+ result = self.normalize_rule_spec(rule_obj)
+
+ return changed, result
+
+ # Delete
+ def delete(self, rule_name=None):
+ """
+ Delete DRS rule using name
+ """
+ changed = False
+ if rule_name is None:
+ rule_name = self.rule_name
+
+ rule = self.get_rule_key_by_name(rule_name=rule_name)
+ if rule is not None:
+ rule_key = int(rule.key)
+ rule_spec = vim.cluster.RuleSpec(removeKey=rule_key, operation='remove')
+ config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
+ try:
+ if not self.module.check_mode:
+ task = self.cluster_obj.ReconfigureEx(config_spec, modify=True)
+ changed, result = wait_for_task(task)
+ else:
+ changed = True
+ result = 'Rule %s will be deleted' % self.rule_name
+ except vmodl.fault.InvalidRequest as e:
+ result = to_native(e.msg)
+ except Exception as e:
+ result = to_native(e)
+ else:
+ result = 'No rule named %s exists' % self.rule_name
+ return changed, result
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ vms=dict(type='list', elements='str'),
+ cluster_name=dict(type='str', required=True),
+ drs_rule_name=dict(type='str', required=True),
+ enabled=dict(type='bool', default=False),
+ mandatory=dict(type='bool', default=False),
+ affinity_rule=dict(type='bool', default=True),
+ )
+ )
+
+ required_if = [
+ ['state', 'present', ['vms']]
+ ]
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ results = dict(failed=False, changed=False)
+ state = module.params['state']
+ vm_drs = VmwareDrs(module)
+
+ if state == 'present':
+ # Add Rule
+ changed, result = vm_drs.create()
+ if changed:
+ results['changed'] = changed
+ else:
+ results['failed'] = True
+ results['msg'] = "Failed to create DRS rule %s" % vm_drs.rule_name
+ results['result'] = result
+ elif state == 'absent':
+ # Delete Rule
+ changed, result = vm_drs.delete()
+ if changed:
+ results['changed'] = changed
+ results['msg'] = "DRS rule %s deleted successfully." % vm_drs.rule_name
+ else:
+ if "No rule named" in result:
+ results['msg'] = result
+ module.exit_json(**results)
+
+ results['failed'] = True
+ results['msg'] = "Failed to delete DRS rule %s" % vm_drs.rule_name
+ results['result'] = result
+
+ if results['changed']:
+ module.exit_json(**results)
+ if results['failed']:
+ module.fail_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vm_vss_dvs_migrate.py b/ansible_collections/community/vmware/plugins/modules/vmware_vm_vss_dvs_migrate.py
new file mode 100644
index 000000000..8fd62553e
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vm_vss_dvs_migrate.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vm_vss_dvs_migrate
+short_description: Migrates a virtual machine from a standard vswitch to distributed
+description:
+ - Migrates a virtual machine from a standard vswitch to distributed
+author:
+- Joseph Callen (@jcpowermac)
+options:
+ vm_name:
+ description:
+ - Name of the virtual machine to migrate to a dvSwitch
+ required: true
+ type: str
+ dvportgroup_name:
+ description:
+ - Name of the portgroup to migrate to the virtual machine to
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Migrate VCSA to vDS
+ community.vmware.vmware_vm_vss_dvs_migrate:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ vm_name: '{{ vm_name }}'
+ dvportgroup_name: '{{ distributed_portgroup_name }}'
+ delegate_to: localhost
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ HAS_PYVMOMI, connect_to_api, get_all_objs,
+ vmware_argument_spec, wait_for_task)
+
+
+class VMwareVmVssDvsMigrate(object):
+ def __init__(self, module):
+ self.module = module
+ self.content = connect_to_api(module)
+ self.vm = None
+ self.vm_name = module.params['vm_name']
+ self.dvportgroup_name = module.params['dvportgroup_name']
+
+ def process_state(self):
+ vm_nic_states = {
+ 'absent': self.migrate_network_adapter_vds,
+ 'present': self.state_exit_unchanged,
+ }
+
+ vm_nic_states[self.check_vm_network_state()]()
+
+ def find_dvspg_by_name(self):
+ vmware_distributed_port_group = get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
+ for dvspg in vmware_distributed_port_group:
+ if dvspg.name == self.dvportgroup_name:
+ return dvspg
+ return None
+
+ def find_vm_by_name(self):
+ virtual_machines = get_all_objs(self.content, [vim.VirtualMachine])
+ for vm in virtual_machines:
+ if vm.name == self.vm_name:
+ return vm
+ return None
+
+ def migrate_network_adapter_vds(self):
+ vm_configspec = vim.vm.ConfigSpec()
+ nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
+ port = vim.dvs.PortConnection()
+ devicespec = vim.vm.device.VirtualDeviceSpec()
+
+ pg = self.find_dvspg_by_name()
+
+ if pg is None:
+ self.module.fail_json(msg="The standard portgroup was not found")
+
+ dvswitch = pg.config.distributedVirtualSwitch
+ port.switchUuid = dvswitch.uuid
+ port.portgroupKey = pg.key
+ nic.port = port
+
+ for device in self.vm.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualEthernetCard):
+ devicespec.device = device
+ devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ devicespec.device.backing = nic
+ vm_configspec.deviceChange.append(devicespec)
+
+ task = self.vm.ReconfigVM_Task(vm_configspec)
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=result)
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def check_vm_network_state(self):
+ try:
+ self.vm = self.find_vm_by_name()
+
+ if self.vm is None:
+ self.module.fail_json(msg="A virtual machine with name %s does not exist" % self.vm_name)
+ for device in self.vm.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualEthernetCard):
+ if isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
+ return 'present'
+ return 'absent'
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(vm_name=dict(required=True, type='str'),
+ dvportgroup_name=dict(required=True, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_vmnic_migrate = VMwareVmVssDvsMigrate(module)
+ vmware_vmnic_migrate.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vmkernel.py b/ansible_collections/community/vmware/plugins/modules/vmware_vmkernel.py
new file mode 100644
index 000000000..a42d5bbb4
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vmkernel.py
@@ -0,0 +1,1121 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2017-18, Ansible Project
+# Copyright: (c) 2017-18, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vmkernel
+short_description: Manages a VMware VMkernel Adapter of an ESXi host.
+description:
+ - This module can be used to manage the VMKernel adapters / VMKernel network interfaces of an ESXi host.
+ - The module assumes that the host is already configured with the Port Group in case of a vSphere Standard Switch (vSS).
+ - The module assumes that the host is already configured with the Distributed Port Group in case of a vSphere Distributed Switch (vDS).
+ - The module automatically migrates the VMKernel adapter from vSS to vDS or vice versa if present.
+author:
+- Joseph Callen (@jcpowermac)
+- Russell Teague (@mtnbikenc)
+- Abhijeet Kasurde (@Akasurde)
+- Christian Kotte (@ckotte)
+notes:
+ - The option C(device) need to be used with DHCP because otherwise it's not possible to check if a VMkernel device is already present
+ - You can only change from DHCP to static, and vSS to vDS, or vice versa, in one step, without creating a new device, with C(device) specified.
+ - You can only create the VMKernel adapter on a vDS if authenticated to vCenter and not if authenticated to ESXi.
+options:
+ vswitch_name:
+ description:
+ - The name of the vSwitch where to add the VMKernel interface.
+ - Required parameter only if C(state) is set to C(present).
+ - Optional parameter from version 2.5 and onwards.
+ type: str
+ aliases: ['vswitch']
+ dvswitch_name:
+ description:
+ - The name of the vSphere Distributed Switch (vDS) where to add the VMKernel interface.
+ - Required parameter only if C(state) is set to C(present).
+ - Optional parameter from version 2.8 and onwards.
+ type: str
+ aliases: ['dvswitch']
+ portgroup_name:
+ description:
+ - The name of the port group for the VMKernel interface.
+ required: true
+ aliases: ['portgroup']
+ type: str
+ network:
+ description:
+ - A dictionary of network details.
+ suboptions:
+ type:
+ type: str
+ description:
+ - Type of IP assignment.
+ choices: [ 'static', 'dhcp' ]
+ default: 'static'
+ ip_address:
+ type: str
+ description:
+ - Static IP address.
+ - Required if C(type) is set to C(static).
+ subnet_mask:
+ type: str
+ description:
+ - Static netmask required.
+ - Required if C(type) is set to C(static).
+ default_gateway:
+ type: str
+ description: Default gateway (Override default gateway for this adapter).
+ tcpip_stack:
+ type: str
+ description:
+ - The TCP/IP stack for the VMKernel interface.
+ choices: [ 'default', 'provisioning', 'vmotion', 'vxlan' ]
+ default: 'default'
+ type: dict
+ default: {
+ type: 'static',
+ tcpip_stack: 'default',
+ }
+ mtu:
+ description:
+ - The MTU for the VMKernel interface.
+ - The default value of 1500 is valid from version 2.5 and onwards.
+ default: 1500
+ type: int
+ device:
+ description:
+ - Search VMkernel adapter by device name.
+ - The parameter is required only in case of C(type) is set to C(dhcp).
+ type: str
+ enable_vsan:
+ description:
+ - Enable VSAN traffic on the VMKernel adapter.
+ - This option is only allowed if the default TCP/IP stack is used.
+ type: bool
+ default: false
+ enable_vmotion:
+ description:
+ - Enable vMotion traffic on the VMKernel adapter.
+ - This option is only allowed if the default TCP/IP stack is used.
+ - You cannot enable vMotion on an additional adapter if you already have an adapter with the vMotion TCP/IP stack configured.
+ type: bool
+ default: false
+ enable_mgmt:
+ description:
+ - Enable Management traffic on the VMKernel adapter.
+ - This option is only allowed if the default TCP/IP stack is used.
+ type: bool
+ default: false
+ enable_ft:
+ description:
+ - Enable Fault Tolerance traffic on the VMKernel adapter.
+ - This option is only allowed if the default TCP/IP stack is used.
+ type: bool
+ default: false
+ enable_provisioning:
+ description:
+ - Enable Provisioning traffic on the VMKernel adapter.
+ - This option is only allowed if the default TCP/IP stack is used.
+ type: bool
+ default: false
+ enable_replication:
+ description:
+ - Enable vSphere Replication traffic on the VMKernel adapter.
+ - This option is only allowed if the default TCP/IP stack is used.
+ type: bool
+ default: false
+ enable_replication_nfc:
+ description:
+ - Enable vSphere Replication NFC traffic on the VMKernel adapter.
+ - This option is only allowed if the default TCP/IP stack is used.
+ type: bool
+ default: false
+ state:
+ description:
+ - If set to C(present), the VMKernel adapter will be created with the given specifications.
+ - If set to C(absent), the VMKernel adapter will be removed.
+ - If set to C(present) and VMKernel adapter exists, the configurations will be updated.
+ choices: [ present, absent ]
+ default: present
+ type: str
+ esxi_hostname:
+ description:
+ - Name of ESXi host to which VMKernel is to be managed.
+ - "From version 2.5 onwards, this parameter is required."
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add Management vmkernel port using static network type
+ community.vmware.vmware_vmkernel:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ vswitch_name: vSwitch0
+ portgroup_name: PG_0001
+ network:
+ type: 'static'
+ ip_address: 192.168.127.10
+ subnet_mask: 255.255.255.0
+ state: present
+ enable_mgmt: true
+ delegate_to: localhost
+
+- name: Add Management vmkernel port using DHCP network type
+ community.vmware.vmware_vmkernel:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ vswitch_name: vSwitch0
+ portgroup_name: PG_0002
+ state: present
+ network:
+ type: 'dhcp'
+ enable_mgmt: true
+ delegate_to: localhost
+
+- name: Change IP allocation from static to dhcp
+ community.vmware.vmware_vmkernel:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ vswitch_name: vSwitch0
+ portgroup_name: PG_0002
+ state: present
+ device: vmk1
+ network:
+ type: 'dhcp'
+ enable_mgmt: true
+ delegate_to: localhost
+
+- name: Delete VMkernel port
+ community.vmware.vmware_vmkernel:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ vswitch_name: vSwitch0
+ portgroup_name: PG_0002
+ state: absent
+ delegate_to: localhost
+
+- name: Add Management vmkernel port to Distributed Switch
+ community.vmware.vmware_vmkernel:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ dvswitch_name: dvSwitch1
+ portgroup_name: dvPG_0001
+ network:
+ type: 'static'
+ ip_address: 192.168.127.10
+ subnet_mask: 255.255.255.0
+ state: present
+ enable_mgmt: true
+ delegate_to: localhost
+
+- name: Add vMotion vmkernel port with vMotion TCP/IP stack
+ community.vmware.vmware_vmkernel:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ dvswitch_name: dvSwitch1
+ portgroup_name: dvPG_0001
+ network:
+ type: 'static'
+ ip_address: 192.168.127.10
+ subnet_mask: 255.255.255.0
+ tcpip_stack: vmotion
+ state: present
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: metadata about VMKernel name
+ returned: always
+ type: dict
+ sample: {
+ "changed": false,
+ "msg": "VMkernel Adapter already configured properly",
+ "device": "vmk1",
+ "ipv4": "static",
+ "ipv4_gw": "No override",
+ "ipv4_ip": "192.168.1.15",
+ "ipv4_sm": "255.255.255.0",
+ "mtu": 9000,
+ "services": "vMotion",
+ "switch": "vDS"
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi, TaskError, vmware_argument_spec, wait_for_task,
+ find_dvspg_by_name, find_dvs_by_name, get_all_objs
+)
+from ansible.module_utils._text import to_native
+
+
+class PyVmomiHelper(PyVmomi):
+ """Class to manage VMkernel configuration of an ESXi host system"""
+
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ if self.params['network']:
+ self.network_type = self.params['network'].get('type')
+ self.ip_address = self.params['network'].get('ip_address', None)
+ self.subnet_mask = self.params['network'].get('subnet_mask', None)
+ self.default_gateway = self.params['network'].get('default_gateway', None)
+ self.tcpip_stack = self.params['network'].get('tcpip_stack')
+ self.device = self.params['device']
+ if self.network_type == 'dhcp' and not self.device:
+ module.fail_json(msg="device is a required parameter when network type is set to 'dhcp'")
+ self.mtu = self.params['mtu']
+ self.enable_vsan = self.params['enable_vsan']
+ self.enable_vmotion = self.params['enable_vmotion']
+ self.enable_mgmt = self.params['enable_mgmt']
+ self.enable_ft = self.params['enable_ft']
+ self.enable_provisioning = self.params['enable_provisioning']
+ self.enable_replication = self.params['enable_replication']
+ self.enable_replication_nfc = self.params['enable_replication_nfc']
+
+ self.vswitch_name = self.params['vswitch_name']
+ self.vds_name = self.params['dvswitch_name']
+ self.port_group_name = self.params['portgroup_name']
+
+ self.esxi_host_name = self.params['esxi_hostname']
+ hosts = self.get_all_host_objs(esxi_host_name=self.esxi_host_name)
+ if hosts:
+ self.esxi_host_obj = hosts[0]
+ else:
+ self.module.fail_json(
+ msg="Failed to get details of ESXi server. Please specify esxi_hostname."
+ )
+
+ if self.network_type == 'static':
+ if self.module.params['state'] == 'absent':
+ pass
+ elif not self.ip_address:
+ module.fail_json(msg="ip_address is a required parameter when network type is set to 'static'")
+ elif not self.subnet_mask:
+ module.fail_json(msg="subnet_mask is a required parameter when network type is set to 'static'")
+
+ # find Port Group
+ if self.vswitch_name:
+ self.port_group_obj = self.get_port_group_by_name(
+ host_system=self.esxi_host_obj,
+ portgroup_name=self.port_group_name,
+ vswitch_name=self.vswitch_name
+ )
+ if not self.port_group_obj:
+ module.fail_json(msg="Portgroup '%s' not found on vSS '%s'" % (self.port_group_name, self.vswitch_name))
+ elif self.vds_name:
+ self.dv_switch_obj = find_dvs_by_name(self.content, self.vds_name)
+ if not self.dv_switch_obj:
+ module.fail_json(msg="vDS '%s' not found" % self.vds_name)
+ self.port_group_obj = find_dvspg_by_name(self.dv_switch_obj, self.port_group_name)
+ if not self.port_group_obj:
+ module.fail_json(msg="Portgroup '%s' not found on vDS '%s'" % (self.port_group_name, self.vds_name))
+
+ # find VMkernel Adapter
+ if self.device:
+ self.vnic = self.get_vmkernel_by_device(device_name=self.device)
+ else:
+ # config change (e.g. DHCP to static, or vice versa); doesn't work with virtual port change
+ self.vnic = self.get_vmkernel_by_portgroup_new(port_group_name=self.port_group_name)
+ if not self.vnic and self.network_type == 'static':
+ # vDS to vSS or vSS to vSS (static IP)
+ self.vnic = self.get_vmkernel_by_ip(ip_address=self.ip_address)
+
+ def get_port_group_by_name(self, host_system, portgroup_name, vswitch_name):
+ """
+ Get specific port group by given name
+ Args:
+ host_system: Name of Host System
+ portgroup_name: Name of Port Group
+ vswitch_name: Name of the vSwitch
+
+ Returns: List of port groups by given specifications
+
+ """
+ portgroups = self.get_all_port_groups_by_host(host_system=host_system)
+
+ for portgroup in portgroups:
+ if portgroup.spec.vswitchName == vswitch_name and portgroup.spec.name == portgroup_name:
+ return portgroup
+ return None
+
+ def ensure(self):
+ """
+ Manage internal VMKernel management
+ Returns: NA
+
+ """
+ host_vmk_states = {
+ 'absent': {
+ 'present': self.host_vmk_delete,
+ 'absent': self.host_vmk_unchange,
+ },
+ 'present': {
+ 'present': self.host_vmk_update,
+ 'absent': self.host_vmk_create,
+ }
+ }
+
+ try:
+ host_vmk_states[self.module.params['state']][self.check_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+
+ def get_vmkernel_by_portgroup_new(self, port_group_name=None):
+ """
+ Check if vmkernel available or not
+ Args:
+ port_group_name: name of port group
+
+ Returns: vmkernel managed object if vmkernel found, false if not
+
+ """
+ for vnic in self.esxi_host_obj.config.network.vnic:
+ # check if it's a vSS Port Group
+ if vnic.spec.portgroup == port_group_name:
+ return vnic
+ # check if it's a vDS Port Group
+ try:
+ if vnic.spec.distributedVirtualPort.portgroupKey == self.port_group_obj.key:
+ return vnic
+ except AttributeError:
+ pass
+ return False
+
+ def get_vmkernel_by_ip(self, ip_address):
+ """
+ Check if vmkernel available or not
+ Args:
+ ip_address: IP address of vmkernel device
+
+ Returns: vmkernel managed object if vmkernel found, false if not
+
+ """
+ for vnic in self.esxi_host_obj.config.network.vnic:
+ if vnic.spec.ip.ipAddress == ip_address:
+ return vnic
+ return None
+
+ def get_vmkernel_by_device(self, device_name):
+ """
+ Check if vmkernel available or not
+ Args:
+ device_name: name of vmkernel device
+
+ Returns: vmkernel managed object if vmkernel found, false if not
+
+ """
+ for vnic in self.esxi_host_obj.config.network.vnic:
+ if vnic.device == device_name:
+ return vnic
+ return None
+
+ def check_state(self):
+ """
+ Check internal state management
+ Returns: Present if found and absent if not found
+
+ """
+ return 'present' if self.vnic else 'absent'
+
+ def host_vmk_delete(self):
+ """
+ Delete VMKernel
+ Returns: NA
+
+ """
+ results = dict(changed=False, msg='')
+ vmk_device = self.vnic.device
+ try:
+ if self.module.check_mode:
+ results['msg'] = "VMkernel Adapter would be deleted"
+ else:
+ self.esxi_host_obj.configManager.networkSystem.RemoveVirtualNic(vmk_device)
+ results['msg'] = "VMkernel Adapter deleted"
+ results['changed'] = True
+ results['device'] = vmk_device
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(
+ msg="Failed to find vmk to delete due to %s" %
+ to_native(not_found.msg)
+ )
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(
+ msg="Failed to delete vmk due host config issues : %s" %
+ to_native(host_config_fault.msg)
+ )
+
+ self.module.exit_json(**results)
+
+ def host_vmk_unchange(self):
+ """
+ Denote no change in VMKernel
+ Returns: NA
+
+ """
+ self.module.exit_json(changed=False)
+
+ def host_vmk_update(self):
+ """
+ Update VMKernel with given parameters
+ Returns: NA
+
+ """
+ changed = changed_settings = changed_vds = changed_services = \
+ changed_service_vmotion = changed_service_mgmt = changed_service_ft = \
+ changed_service_vsan = changed_service_prov = changed_service_rep = changed_service_rep_nfc = False
+ changed_list = []
+ results = dict(changed=False, msg='')
+
+ results['tcpip_stack'] = self.tcpip_stack
+ net_stack_instance_key = self.get_api_net_stack_instance(self.tcpip_stack)
+ if self.vnic.spec.netStackInstanceKey != net_stack_instance_key:
+ self.module.fail_json(msg="The TCP/IP stack cannot be changed on an existing VMkernel adapter!")
+
+ # Check MTU
+ results['mtu'] = self.mtu
+ if self.vnic.spec.mtu != self.mtu:
+ changed_settings = True
+ changed_list.append("MTU")
+ results['mtu_previous'] = self.vnic.spec.mtu
+
+ # Check IPv4 settings
+ results['ipv4'] = self.network_type
+ results['ipv4_ip'] = self.ip_address
+ results['ipv4_sm'] = self.subnet_mask
+ if self.default_gateway:
+ results['ipv4_gw'] = self.default_gateway
+ else:
+ results['ipv4_gw'] = "No override"
+ if self.vnic.spec.ip.dhcp:
+ if self.network_type == 'static':
+ changed_settings = True
+ changed_list.append("IPv4 settings")
+ results['ipv4_previous'] = "DHCP"
+ if not self.vnic.spec.ip.dhcp:
+ if self.network_type == 'dhcp':
+ changed_settings = True
+ changed_list.append("IPv4 settings")
+ results['ipv4_previous'] = "static"
+ elif self.network_type == 'static':
+ if self.ip_address != self.vnic.spec.ip.ipAddress:
+ changed_settings = True
+ changed_list.append("IP")
+ results['ipv4_ip_previous'] = self.vnic.spec.ip.ipAddress
+ if self.subnet_mask != self.vnic.spec.ip.subnetMask:
+ changed_settings = True
+ changed_list.append("SM")
+ results['ipv4_sm_previous'] = self.vnic.spec.ip.subnetMask
+ if self.default_gateway:
+ try:
+ if self.default_gateway != self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway:
+ changed_settings = True
+ changed_list.append("GW override")
+ results['ipv4_gw_previous'] = self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway
+ except AttributeError:
+ changed_settings = True
+ changed_list.append("GW override")
+ results['ipv4_gw_previous'] = "No override"
+ else:
+ try:
+ if self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway:
+ changed_settings = True
+ changed_list.append("GW override")
+ results['ipv4_gw_previous'] = self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway
+ except AttributeError:
+ pass
+
+ # Check virtual port (vSS or vDS)
+ results['portgroup'] = self.port_group_name
+ dvs_uuid = None
+ if self.vswitch_name:
+ results['switch'] = self.vswitch_name
+ try:
+ if self.vnic.spec.distributedVirtualPort.switchUuid:
+ changed_vds = True
+ changed_list.append("Virtual Port")
+ dvs_uuid = self.vnic.spec.distributedVirtualPort.switchUuid
+ except AttributeError:
+ pass
+ if changed_vds:
+ results['switch_previous'] = self.find_dvs_by_uuid(dvs_uuid)
+ self.dv_switch_obj = find_dvs_by_name(self.content, results['switch_previous'])
+ results['portgroup_previous'] = self.find_dvspg_by_key(
+ self.dv_switch_obj, self.vnic.spec.distributedVirtualPort.portgroupKey
+ )
+ elif self.vds_name:
+ results['switch'] = self.vds_name
+ try:
+ if self.vnic.spec.distributedVirtualPort.switchUuid != self.dv_switch_obj.uuid:
+ changed_vds = True
+ changed_list.append("Virtual Port")
+ dvs_uuid = self.vnic.spec.distributedVirtualPort.switchUuid
+ except AttributeError:
+ changed_vds = True
+ changed_list.append("Virtual Port")
+ if changed_vds:
+ results['switch_previous'] = self.find_dvs_by_uuid(dvs_uuid)
+ results['portgroup_previous'] = self.vnic.spec.portgroup
+ portgroups = self.get_all_port_groups_by_host(host_system=self.esxi_host_obj)
+ for portgroup in portgroups:
+ if portgroup.spec.name == self.vnic.spec.portgroup:
+ results['switch_previous'] = portgroup.spec.vswitchName
+
+ results['services'] = self.create_enabled_services_string()
+ # Check configuration of service types (only if default TCP/IP stack is used)
+ if self.vnic.spec.netStackInstanceKey == 'defaultTcpipStack':
+ service_type_vmks = self.get_all_vmks_by_service_type()
+ if (self.enable_vmotion and self.vnic.device not in service_type_vmks['vmotion']) or \
+ (not self.enable_vmotion and self.vnic.device in service_type_vmks['vmotion']):
+ changed_services = changed_service_vmotion = True
+
+ if (self.enable_mgmt and self.vnic.device not in service_type_vmks['management']) or \
+ (not self.enable_mgmt and self.vnic.device in service_type_vmks['management']):
+ changed_services = changed_service_mgmt = True
+
+ if (self.enable_ft and self.vnic.device not in service_type_vmks['faultToleranceLogging']) or \
+ (not self.enable_ft and self.vnic.device in service_type_vmks['faultToleranceLogging']):
+ changed_services = changed_service_ft = True
+
+ if (self.enable_vsan and self.vnic.device not in service_type_vmks['vsan']) or \
+ (not self.enable_vsan and self.vnic.device in service_type_vmks['vsan']):
+ changed_services = changed_service_vsan = True
+
+ if (self.enable_provisioning and self.vnic.device not in service_type_vmks['vSphereProvisioning']) or \
+ (not self.enable_provisioning and self.vnic.device in service_type_vmks['vSphereProvisioning']):
+ changed_services = changed_service_prov = True
+
+ if (self.enable_replication and self.vnic.device not in service_type_vmks['vSphereReplication']) or \
+ (not self.enable_replication and self.vnic.device in service_type_vmks['vSphereReplication']):
+ changed_services = changed_service_rep = True
+
+ if (self.enable_replication_nfc and self.vnic.device not in service_type_vmks['vSphereReplicationNFC']) or \
+ (not self.enable_replication_nfc and self.vnic.device in service_type_vmks['vSphereReplicationNFC']):
+ changed_services = changed_service_rep_nfc = True
+ if changed_services:
+ changed_list.append("services")
+
+ if changed_settings or changed_vds or changed_services:
+ changed = True
+ if self.module.check_mode:
+ changed_suffix = ' would be updated'
+ else:
+ changed_suffix = ' updated'
+ if len(changed_list) > 2:
+ message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
+ elif len(changed_list) == 2:
+ message = ' and '.join(changed_list)
+ elif len(changed_list) == 1:
+ message = changed_list[0]
+ message = "VMkernel Adapter " + message + changed_suffix
+ if changed_settings or changed_vds:
+ vnic_config = vim.host.VirtualNic.Specification()
+ ip_spec = vim.host.IpConfig()
+ if self.network_type == 'dhcp':
+ ip_spec.dhcp = True
+ else:
+ ip_spec.dhcp = False
+ ip_spec.ipAddress = self.ip_address
+ ip_spec.subnetMask = self.subnet_mask
+ if self.default_gateway:
+ vnic_config.ipRouteSpec = vim.host.VirtualNic.IpRouteSpec()
+ vnic_config.ipRouteSpec.ipRouteConfig = vim.host.IpRouteConfig()
+ vnic_config.ipRouteSpec.ipRouteConfig.defaultGateway = self.default_gateway
+ else:
+ vnic_config.ipRouteSpec = vim.host.VirtualNic.IpRouteSpec()
+ vnic_config.ipRouteSpec.ipRouteConfig = vim.host.IpRouteConfig()
+
+ vnic_config.ip = ip_spec
+ vnic_config.mtu = self.mtu
+
+ if changed_vds:
+ if self.vswitch_name:
+ vnic_config.portgroup = self.port_group_name
+ elif self.vds_name:
+ vnic_config.distributedVirtualPort = vim.dvs.PortConnection()
+ vnic_config.distributedVirtualPort.switchUuid = self.dv_switch_obj.uuid
+ vnic_config.distributedVirtualPort.portgroupKey = self.port_group_obj.key
+
+ try:
+ if not self.module.check_mode:
+ self.esxi_host_obj.configManager.networkSystem.UpdateVirtualNic(self.vnic.device, vnic_config)
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(
+ msg="Failed to update vmk as virtual network adapter cannot be found %s" %
+ to_native(not_found.msg)
+ )
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(
+ msg="Failed to update vmk due to host config issues : %s" %
+ to_native(host_config_fault.msg)
+ )
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(
+ msg="Failed to update vmk as ipv6 address is specified in an ipv4 only system : %s" %
+ to_native(invalid_state.msg)
+ )
+ except vmodl.fault.InvalidArgument as invalid_arg:
+ self.module.fail_json(
+ msg="Failed to update vmk as IP address or Subnet Mask in the IP configuration"
+ "are invalid or PortGroup does not exist : %s" % to_native(invalid_arg.msg)
+ )
+
+ if changed_services:
+ changed_list.append("Services")
+ services_previous = []
+ vnic_manager = self.esxi_host_obj.configManager.virtualNicManager
+
+ if changed_service_mgmt:
+ if self.vnic.device in service_type_vmks['management']:
+ services_previous.append('Mgmt')
+ operation = 'select' if self.enable_mgmt else 'deselect'
+ self.set_service_type(
+ vnic_manager=vnic_manager, vmk=self.vnic, service_type='management', operation=operation
+ )
+
+ if changed_service_vmotion:
+ if self.vnic.device in service_type_vmks['vmotion']:
+ services_previous.append('vMotion')
+ operation = 'select' if self.enable_vmotion else 'deselect'
+ self.set_service_type(
+ vnic_manager=vnic_manager, vmk=self.vnic, service_type='vmotion', operation=operation
+ )
+
+ if changed_service_ft:
+ if self.vnic.device in service_type_vmks['faultToleranceLogging']:
+ services_previous.append('FT')
+ operation = 'select' if self.enable_ft else 'deselect'
+ self.set_service_type(
+ vnic_manager=vnic_manager, vmk=self.vnic, service_type='faultToleranceLogging', operation=operation
+ )
+
+ if changed_service_prov:
+ if self.vnic.device in service_type_vmks['vSphereProvisioning']:
+ services_previous.append('Prov')
+ operation = 'select' if self.enable_provisioning else 'deselect'
+ self.set_service_type(
+ vnic_manager=vnic_manager, vmk=self.vnic, service_type='vSphereProvisioning', operation=operation
+ )
+
+ if changed_service_rep:
+ if self.vnic.device in service_type_vmks['vSphereReplication']:
+ services_previous.append('Repl')
+ operation = 'select' if self.enable_replication else 'deselect'
+ self.set_service_type(
+ vnic_manager=vnic_manager, vmk=self.vnic, service_type='vSphereReplication', operation=operation
+ )
+
+ if changed_service_rep_nfc:
+ if self.vnic.device in service_type_vmks['vSphereReplicationNFC']:
+ services_previous.append('Repl_NFC')
+ operation = 'select' if self.enable_replication_nfc else 'deselect'
+ self.set_service_type(
+ vnic_manager=vnic_manager, vmk=self.vnic, service_type='vSphereReplicationNFC', operation=operation
+ )
+
+ if changed_service_vsan:
+ if self.vnic.device in service_type_vmks['vsan']:
+ services_previous.append('VSAN')
+ results['vsan'] = self.set_vsan_service_type(self.enable_vsan)
+
+ results['services_previous'] = ', '.join(services_previous)
+ else:
+ message = "VMkernel Adapter already configured properly"
+
+ results['changed'] = changed
+ results['msg'] = message
+ results['device'] = self.vnic.device
+ self.module.exit_json(**results)
+
+ def find_dvs_by_uuid(self, uuid):
+ """
+ Find DVS by UUID
+ Returns: DVS name
+ """
+ dvs_list = get_all_objs(self.content, [vim.DistributedVirtualSwitch])
+ for dvs in dvs_list:
+ if dvs.uuid == uuid:
+ return dvs.summary.name
+ return None
+
+ def find_dvspg_by_key(self, dv_switch, portgroup_key):
+ """
+ Find dvPortgroup by key
+ Returns: dvPortgroup name
+ """
+
+ portgroups = dv_switch.portgroup
+
+ for portgroup in portgroups:
+ if portgroup.key == portgroup_key:
+ return portgroup.name
+
+ return None
+
+ def set_vsan_service_type(self, enable_vsan):
+ """
+ Set VSAN service type
+ Returns: result of UpdateVsan_Task
+
+ """
+ result = None
+ vsan_system = self.esxi_host_obj.configManager.vsanSystem
+
+ vsan_system_config = vsan_system.config
+ vsan_config = vim.vsan.host.ConfigInfo()
+
+ vsan_config.networkInfo = vsan_system_config.networkInfo
+ current_vsan_vnics = [portConfig.device for portConfig in vsan_system_config.networkInfo.port]
+ changed = False
+ result = "%s NIC %s (currently enabled NICs: %s) : " % ("Enable" if enable_vsan else "Disable", self.vnic.device, current_vsan_vnics)
+ if not enable_vsan:
+ if self.vnic.device in current_vsan_vnics:
+ vsan_config.networkInfo.port = list(filter(lambda portConfig: portConfig.device != self.vnic.device, vsan_config.networkInfo.port))
+ changed = True
+ else:
+ if self.vnic.device not in current_vsan_vnics:
+ vsan_port_config = vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig()
+ vsan_port_config.device = self.vnic.device
+
+ if vsan_config.networkInfo is None:
+ vsan_config.networkInfo = vim.vsan.host.ConfigInfo.NetworkInfo()
+ vsan_config.networkInfo.port = [vsan_port_config]
+ else:
+ vsan_config.networkInfo.port.append(vsan_port_config)
+ changed = True
+
+ if not self.module.check_mode and changed:
+ try:
+ vsan_task = vsan_system.UpdateVsan_Task(vsan_config)
+ task_result = wait_for_task(vsan_task)
+ if task_result[0]:
+ result += "Success"
+ else:
+ result += "Failed"
+ except TaskError as task_err:
+ self.module.fail_json(
+ msg="Failed to set service type to vsan for %s : %s" % (self.vnic.device, to_native(task_err))
+ )
+ if self.module.check_mode:
+ result += "Dry-run"
+ return result
+
+ def host_vmk_create(self):
+ """
+ Create VMKernel
+ Returns: NA
+
+ """
+ results = dict(changed=False, message='')
+ if self.vswitch_name:
+ results['switch'] = self.vswitch_name
+ elif self.vds_name:
+ results['switch'] = self.vds_name
+ results['portgroup'] = self.port_group_name
+
+ vnic_config = vim.host.VirtualNic.Specification()
+
+ ip_spec = vim.host.IpConfig()
+ results['ipv4'] = self.network_type
+ if self.network_type == 'dhcp':
+ ip_spec.dhcp = True
+ else:
+ ip_spec.dhcp = False
+ results['ipv4_ip'] = self.ip_address
+ results['ipv4_sm'] = self.subnet_mask
+ ip_spec.ipAddress = self.ip_address
+ ip_spec.subnetMask = self.subnet_mask
+ if self.default_gateway:
+ vnic_config.ipRouteSpec = vim.host.VirtualNic.IpRouteSpec()
+ vnic_config.ipRouteSpec.ipRouteConfig = vim.host.IpRouteConfig()
+ vnic_config.ipRouteSpec.ipRouteConfig.defaultGateway = self.default_gateway
+ vnic_config.ip = ip_spec
+
+ results['mtu'] = self.mtu
+ vnic_config.mtu = self.mtu
+
+ results['tcpip_stack'] = self.tcpip_stack
+ vnic_config.netStackInstanceKey = self.get_api_net_stack_instance(self.tcpip_stack)
+
+ vmk_device = None
+ try:
+ if self.module.check_mode:
+ results['msg'] = "VMkernel Adapter would be created"
+ else:
+ if self.vswitch_name:
+ vmk_device = self.esxi_host_obj.configManager.networkSystem.AddVirtualNic(
+ self.port_group_name,
+ vnic_config
+ )
+ elif self.vds_name:
+ vnic_config.distributedVirtualPort = vim.dvs.PortConnection()
+ vnic_config.distributedVirtualPort.switchUuid = self.dv_switch_obj.uuid
+ vnic_config.distributedVirtualPort.portgroupKey = self.port_group_obj.key
+ vmk_device = self.esxi_host_obj.configManager.networkSystem.AddVirtualNic(portgroup="", nic=vnic_config)
+ results['msg'] = "VMkernel Adapter created"
+ results['changed'] = True
+ results['device'] = vmk_device
+ if self.network_type != 'dhcp':
+ if self.default_gateway:
+ results['ipv4_gw'] = self.default_gateway
+ else:
+ results['ipv4_gw'] = "No override"
+ results['services'] = self.create_enabled_services_string()
+ except vim.fault.AlreadyExists as already_exists:
+ self.module.fail_json(
+ msg="Failed to add vmk as portgroup already has a virtual network adapter %s" %
+ to_native(already_exists.msg)
+ )
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(
+ msg="Failed to add vmk due to host config issues : %s" %
+ to_native(host_config_fault.msg)
+ )
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(
+ msg="Failed to add vmk as ipv6 address is specified in an ipv4 only system : %s" %
+ to_native(invalid_state.msg)
+ )
+ except vmodl.fault.InvalidArgument as invalid_arg:
+ self.module.fail_json(
+ msg="Failed to add vmk as IP address or Subnet Mask in the IP configuration "
+ "are invalid or PortGroup does not exist : %s" % to_native(invalid_arg.msg)
+ )
+
+ # do service type configuration
+ if self.tcpip_stack == 'default' and not all(
+ option is False for option in [self.enable_vsan, self.enable_vmotion,
+ self.enable_mgmt, self.enable_ft,
+ self.enable_provisioning, self.enable_replication,
+ self.enable_replication_nfc]):
+ self.vnic = self.get_vmkernel_by_device(device_name=vmk_device)
+
+ # VSAN
+ if self.enable_vsan:
+ results['vsan'] = self.set_vsan_service_type(self.enable_vsan)
+
+ # Other service type
+ host_vnic_manager = self.esxi_host_obj.configManager.virtualNicManager
+ if self.enable_vmotion:
+ self.set_service_type(host_vnic_manager, self.vnic, 'vmotion')
+
+ if self.enable_mgmt:
+ self.set_service_type(host_vnic_manager, self.vnic, 'management')
+
+ if self.enable_ft:
+ self.set_service_type(host_vnic_manager, self.vnic, 'faultToleranceLogging')
+
+ if self.enable_provisioning:
+ self.set_service_type(host_vnic_manager, self.vnic, 'vSphereProvisioning')
+
+ if self.enable_replication:
+ self.set_service_type(host_vnic_manager, self.vnic, 'vSphereReplication')
+
+ if self.enable_replication_nfc:
+ self.set_service_type(host_vnic_manager, self.vnic, 'vSphereReplicationNFC')
+
+ self.module.exit_json(**results)
+
+ def set_service_type(self, vnic_manager, vmk, service_type, operation='select'):
+ """
+ Set service type to given VMKernel
+ Args:
+ vnic_manager: Virtual NIC manager object
+ vmk: VMkernel managed object
+ service_type: Name of service type
+ operation: Select to select service type, deselect to deselect service type
+
+ """
+ try:
+ if operation == 'select':
+ if not self.module.check_mode:
+ vnic_manager.SelectVnicForNicType(service_type, vmk.device)
+ elif operation == 'deselect':
+ if not self.module.check_mode:
+ vnic_manager.DeselectVnicForNicType(service_type, vmk.device)
+ except vmodl.fault.InvalidArgument as invalid_arg:
+ self.module.fail_json(
+ msg="Failed to %s VMK service type '%s' on '%s' due to : %s" %
+ (operation, service_type, vmk.device, to_native(invalid_arg.msg))
+ )
+
+ def get_all_vmks_by_service_type(self):
+ """
+ Return information about service types and VMKernel
+ Returns: Dictionary of service type as key and VMKernel list as value
+
+ """
+ service_type_vmk = dict(
+ vmotion=[],
+ vsan=[],
+ management=[],
+ faultToleranceLogging=[],
+ vSphereProvisioning=[],
+ vSphereReplication=[],
+ vSphereReplicationNFC=[],
+ )
+
+ for service_type in list(service_type_vmk):
+ vmks_list = self.query_service_type_for_vmks(service_type)
+ service_type_vmk[service_type] = vmks_list
+ return service_type_vmk
+
+ def query_service_type_for_vmks(self, service_type):
+ """
+ Return list of VMKernels
+ Args:
+ service_type: Name of service type
+
+ Returns: List of VMKernel which belongs to that service type
+
+ """
+ vmks_list = []
+ query = None
+ try:
+ query = self.esxi_host_obj.configManager.virtualNicManager.QueryNetConfig(service_type)
+ except vim.fault.HostConfigFault as config_fault:
+ self.module.fail_json(
+ msg="Failed to get all VMKs for service type %s due to host config fault : %s" %
+ (service_type, to_native(config_fault.msg))
+ )
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(
+ msg="Failed to get all VMKs for service type %s due to invalid arguments : %s" %
+ (service_type, to_native(invalid_argument.msg))
+ )
+
+ if not query.selectedVnic:
+ return vmks_list
+ vnics_with_service_type = [vnic.device for vnic in query.candidateVnic if vnic.key in query.selectedVnic]
+ return vnics_with_service_type
+
+ def create_enabled_services_string(self):
+ """Create services list"""
+ services = []
+ if self.enable_mgmt:
+ services.append('Mgmt')
+ if self.enable_vmotion:
+ services.append('vMotion')
+ if self.enable_ft:
+ services.append('FT')
+ if self.enable_vsan:
+ services.append('VSAN')
+ if self.enable_provisioning:
+ services.append('Prov')
+ if self.enable_replication:
+ services.append('Repl')
+ if self.enable_replication_nfc:
+ services.append('Repl_NFC')
+ return ', '.join(services)
+
+ @staticmethod
+ def get_api_net_stack_instance(tcpip_stack):
+ """Get TCP/IP stack instance name or key"""
+ net_stack_instance = None
+ if tcpip_stack == 'default':
+ net_stack_instance = 'defaultTcpipStack'
+ elif tcpip_stack == 'provisioning':
+ net_stack_instance = 'vSphereProvisioning'
+ # vmotion and vxlan stay the same
+ elif tcpip_stack == 'vmotion':
+ net_stack_instance = 'vmotion'
+ elif tcpip_stack == 'vxlan':
+ net_stack_instance = 'vxlan'
+ elif tcpip_stack == 'defaultTcpipStack':
+ net_stack_instance = 'default'
+ elif tcpip_stack == 'vSphereProvisioning':
+ net_stack_instance = 'provisioning'
+
+ return net_stack_instance
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ esxi_hostname=dict(required=True, type='str'),
+ portgroup_name=dict(required=True, type='str', aliases=['portgroup']),
+ mtu=dict(required=False, type='int', default=1500),
+ device=dict(type='str'),
+ enable_vsan=dict(required=False, type='bool', default=False),
+ enable_vmotion=dict(required=False, type='bool', default=False),
+ enable_mgmt=dict(required=False, type='bool', default=False),
+ enable_ft=dict(required=False, type='bool', default=False),
+ enable_provisioning=dict(type='bool', default=False),
+ enable_replication=dict(type='bool', default=False),
+ enable_replication_nfc=dict(type='bool', default=False),
+ vswitch_name=dict(required=False, type='str', aliases=['vswitch']),
+ dvswitch_name=dict(required=False, type='str', aliases=['dvswitch']),
+ network=dict(
+ type='dict',
+ options=dict(
+ type=dict(type='str', default='static', choices=['static', 'dhcp']),
+ ip_address=dict(type='str'),
+ subnet_mask=dict(type='str'),
+ default_gateway=dict(type='str'),
+ tcpip_stack=dict(type='str', default='default', choices=['default', 'provisioning', 'vmotion', 'vxlan']),
+ ),
+ default=dict(
+ type='static',
+ tcpip_stack='default',
+ ),
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['absent', 'present']
+ ),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['vswitch_name', 'dvswitch_name'],
+ ],
+ required_one_of=[
+ ['vswitch_name', 'dvswitch_name'],
+ ['portgroup_name', 'device'],
+ ],
+ required_if=[
+ ['state', 'present', ['portgroup_name']],
+ ['state', 'absent', ['device']]
+ ],
+ supports_check_mode=True)
+
+ pyv = PyVmomiHelper(module)
+ pyv.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vmkernel_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_vmkernel_info.py
new file mode 100644
index 000000000..0ea95bb8d
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vmkernel_info.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vmkernel_info
+short_description: Gathers VMKernel info about an ESXi host
+description:
+- This module can be used to gather VMKernel information about an ESXi host from given ESXi hostname or cluster name.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - VMKernel information about each ESXi server will be returned for the given cluster.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname.
+ - VMKernel information about this ESXi server will be returned.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather VMKernel info about all ESXi Host in given Cluster
+ community.vmware.vmware_vmkernel_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: cluster_name
+ delegate_to: localhost
+ register: cluster_host_vmks
+
+- name: Gather VMKernel info about ESXi Host
+ community.vmware.vmware_vmkernel_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+ register: host_vmks
+'''
+
+RETURN = r'''
+host_vmk_info:
+ description: metadata about VMKernel present on given host system
+ returned: success
+ type: dict
+ sample:
+ {
+ "10.76.33.208": [
+ {
+ "device": "vmk0",
+ "dhcp": true,
+ "enable_ft": false,
+ "enable_management": true,
+ "enable_vmotion": false,
+ "enable_vsan": false,
+ "ipv4_address": "10.76.33.28",
+ "ipv4_subnet_mask": "255.255.255.0",
+ "key": "key-vim.host.VirtualNic-vmk0",
+ "mac": "52:54:00:12:50:ce",
+ "mtu": 1500,
+ "portgroup": "Management Network",
+ "stack": "defaultTcpipStack"
+ },
+ ]
+ }
+
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+from ansible.module_utils._text import to_native
+
+
+class VmkernelInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(VmkernelInfoManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ self.service_type_vmks = dict()
+ self.get_all_vmks_by_service_type()
+
+ def get_all_vmks_by_service_type(self):
+ """
+ Function to return information about service types and VMKernel
+
+ """
+ for host in self.hosts:
+ self.service_type_vmks[host.name] = dict(vmotion=[], vsan=[], management=[], faultToleranceLogging=[])
+ for service_type in self.service_type_vmks[host.name].keys():
+ vmks_list = self.query_service_type_for_vmks(host, service_type)
+ self.service_type_vmks[host.name][service_type] = vmks_list
+
+ def query_service_type_for_vmks(self, host_system, service_type):
+ """
+ Function to return list of VMKernels
+ Args:
+ host_system: Host system managed object
+ service_type: Name of service type
+
+ Returns: List of VMKernel which belongs to that service type
+
+ """
+ vmks_list = []
+ query = None
+ try:
+ query = host_system.configManager.virtualNicManager.QueryNetConfig(service_type)
+ except vim.fault.HostConfigFault as config_fault:
+ self.module.fail_json(msg="Failed to get all VMKs for service type %s due to"
+ " host config fault : %s" % (service_type, to_native(config_fault.msg)))
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(msg="Failed to get all VMKs for service type %s due to"
+ " invalid arguments : %s" % (service_type, to_native(invalid_argument.msg)))
+ except Exception as e:
+ self.module.fail_json(msg="Failed to get all VMKs for service type %s due to"
+ "%s" % (service_type, to_native(e)))
+
+ if not query or not query.selectedVnic:
+ return vmks_list
+ selected_vnics = list(query.selectedVnic)
+ vnics_with_service_type = [vnic.device for vnic in query.candidateVnic if vnic.key in selected_vnics]
+ return vnics_with_service_type
+
+ def gather_host_vmk_info(self):
+ hosts_info = {}
+
+ for host in self.hosts:
+ host_vmk_info = []
+ host_network_system = host.config.network
+ if host_network_system:
+ vmks_config = host.config.network.vnic
+ for vmk in vmks_config:
+ host_vmk_info.append(dict(
+ device=vmk.device,
+ key=vmk.key,
+ portgroup=vmk.portgroup,
+ ipv4_address=vmk.spec.ip.ipAddress,
+ ipv4_subnet_mask=vmk.spec.ip.subnetMask,
+ dhcp=vmk.spec.ip.dhcp,
+ mac=vmk.spec.mac,
+ mtu=vmk.spec.mtu,
+ stack=vmk.spec.netStackInstanceKey,
+ enable_vsan=vmk.device in self.service_type_vmks[host.name]['vsan'],
+ enable_vmotion=vmk.device in self.service_type_vmks[host.name]['vmotion'],
+ enable_management=vmk.device in self.service_type_vmks[host.name]['management'],
+ enable_ft=vmk.device in self.service_type_vmks[host.name]['faultToleranceLogging'],
+ )
+ )
+ hosts_info[host.name] = host_vmk_info
+ return hosts_info
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ vmware_vmk_config = VmkernelInfoManager(module)
+ module.exit_json(changed=False, host_vmk_info=vmware_vmk_config.gather_host_vmk_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vmotion.py b/ansible_collections/community/vmware/plugins/modules/vmware_vmotion.py
new file mode 100644
index 000000000..e9d39c920
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vmotion.py
@@ -0,0 +1,557 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Bede Carroll <bc+github () bedecarroll.com>
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vmotion
+short_description: Move a virtual machine using vMotion, and/or its vmdks using storage vMotion.
+description:
+ - Using VMware vCenter, move a virtual machine using vMotion to a different
+ host, and/or its vmdks to another datastore using storage vMotion.
+author:
+- Bede Carroll (@bedecarroll)
+- Olivier Boukili (@oboukili)
+options:
+ vm_name:
+ description:
+ - Name of the VM to perform a vMotion on.
+ - This is required parameter, if C(vm_uuid) is not set.
+ - Version 2.6 onwards, this parameter is not a required parameter, unlike the previous versions.
+ aliases: ['vm']
+ type: str
+ vm_uuid:
+ description:
+ - UUID of the virtual machine to perform a vMotion operation on.
+ - This is a required parameter, if C(vm_name) or C(moid) is not set.
+ aliases: ['uuid']
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(vm_name) or C(vm_uuid) is not supplied.
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: false
+ type: bool
+ destination_host:
+ description:
+ - Name of the destination host the virtual machine should be running on.
+ - Version 2.6 onwards, this parameter is not a required parameter, unlike the previous versions.
+ aliases: ['destination']
+ type: str
+ destination_cluster:
+ version_added: '2.5.0'
+ description:
+ - Name of the destination cluster the virtual machine should be running on.
+ - Only works if drs is enabled for this cluster.
+ type: str
+ destination_datastore_cluster:
+ version_added: '2.5.0'
+ description:
+ - Name of the destination datastore cluster (storage pod) the virtual machine's vmdk should be moved on.
+ - Only works if drs is enabled for the cluster the vm is running / should run.
+ type: str
+ destination_datastore:
+ description:
+ - Name of the destination datastore the virtual machine's vmdk should be moved on.
+ aliases: ['datastore']
+ type: str
+ destination_datacenter:
+ description:
+ - Name of the destination datacenter the datastore is located on.
+ - Optional, required only when datastores are shared across datacenters.
+ type: str
+ destination_resourcepool:
+ description:
+ - Name of the destination resource pool where the virtual machine should be running.
+ - Resource pool is required if vmotion is done between hosts which are part of different clusters or datacenters.
+ - if not passed, resource_pool object will be retrived from host_obj parent.
+ aliases: ['resource_pool']
+ type: str
+ timeout:
+ description:
+ - The timeout in seconds. When the timeout is reached, the module will fail.
+ type: int
+ default: 3600
+ version_added: '3.4.0'
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Perform vMotion of virtual machine
+ community.vmware.vmware_vmotion:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ vm_name: 'vm_name_as_per_vcenter'
+ destination_host: 'destination_host_as_per_vcenter'
+ delegate_to: localhost
+
+- name: Perform vMotion of virtual machine
+ community.vmware.vmware_vmotion:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ moid: vm-42
+ destination_host: 'destination_host_as_per_vcenter'
+ delegate_to: localhost
+
+- name: Perform vMotion of virtual machine to resource_pool
+ community.vmware.vmware_vmotion:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ moid: vm-42
+ destination_host: 'destination_host_as_per_vcenter'
+ destination_resourcepool: 'destination_resourcepool_as_per_vcenter'
+ delegate_to: localhost
+
+- name: Perform storage vMotion of virtual machine
+ community.vmware.vmware_vmotion:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ vm_name: 'vm_name_as_per_vcenter'
+ destination_datastore: 'destination_datastore_as_per_vcenter'
+ delegate_to: localhost
+
+- name: Perform storage vMotion and host vMotion of virtual machine
+ community.vmware.vmware_vmotion:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ vm_name: 'vm_name_as_per_vcenter'
+ destination_host: 'destination_host_as_per_vcenter'
+ destination_datastore: 'destination_datastore_as_per_vcenter'
+ delegate_to: localhost
+
+- name: Perform storage vMotion to a Storage Cluster and vMotion to a Cluster of virtual machine
+ community.vmware.vmware_vmotion:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ vm_name: 'vm_name_as_per_vcenter'
+ destination_cluster: 'destination_cluster_as_per_vcenter'
+ destination_datastore_cluster: 'destination_datastore_cluster_as_per_vcenter'
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+running_host:
+ description:
+ - List the host the virtual machine is registered to.
+ - Only returned if there is asked for a vMotion (Cluster or Host).
+ returned: changed or success
+ type: str
+ sample: 'host1.example.com'
+datastore:
+ description:
+ - List the datastore the virtual machine is on.
+ - Only returned if there is asked for a Storage vMotion (Datastore or Datastore Cluster).
+ returned: changed or success
+ type: str
+ sample: 'datastore1'
+'''
+
+try:
+ from pyVmomi import vim, VmomiSupport
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ PyVmomi, find_hostsystem_by_name,
+ find_vm_by_id, find_datastore_by_name,
+ find_resource_pool_by_name,
+ find_datacenter_by_name,
+ find_cluster_by_name, get_all_objs,
+ vmware_argument_spec, wait_for_task, TaskError)
+
+
+class VmotionManager(PyVmomi):
+ def __init__(self, module):
+ super(VmotionManager, self).__init__(module)
+ self.vm = None
+ self.vm_uuid = self.params.get('vm_uuid', None)
+ self.use_instance_uuid = self.params.get('use_instance_uuid', False)
+ self.vm_name = self.params.get('vm_name', None)
+ self.moid = self.params.get('moid') or None
+ self.destination_datacenter = self.params.get('destination_datacenter', None)
+ self.timeout = self.params.get('timeout')
+ result = dict()
+
+ self.get_vm()
+ if self.vm is None:
+ vm_id = self.vm_uuid or self.vm_name or self.moid
+ self.module.fail_json(msg="Failed to find the virtual machine with %s" % vm_id)
+
+ # Get Datacenter if specified by user
+ dest_datacenter = self.destination_datacenter
+ datacenter_object = None
+ if dest_datacenter is not None:
+ datacenter_object = find_datacenter_by_name(content=self.content, datacenter_name=dest_datacenter)
+ if datacenter_object:
+ dest_datacenter = datacenter_object
+
+ # Get Destination Host System or Cluster if specified by user
+ dest_host_name = self.params.get('destination_host', None)
+ dest_cluster_name = self.params.get('destination_cluster', None)
+
+ if dest_host_name and dest_cluster_name:
+ self.module.fail_json(msg="Please only define one: destination_host or destination_cluster")
+
+ self.host_object = None
+ self.cluster_object = None
+ self.cluster_hosts = None
+
+ if dest_host_name is not None:
+ self.host_object = find_hostsystem_by_name(content=self.content,
+ hostname=dest_host_name)
+ if self.host_object is None:
+ self.module.fail_json(msg="Unable to find destination host %s" % dest_host_name)
+ if dest_cluster_name is not None:
+ self.cluster_object = find_cluster_by_name(content=self.content,
+ cluster_name=dest_cluster_name, datacenter=datacenter_object)
+ if self.cluster_object:
+ self.cluster_hosts = []
+ for host in self.cluster_object.host:
+ self.cluster_hosts.append(host)
+ else:
+ self.module.fail_json(msg="Unable to find destination cluster %s" % dest_cluster_name)
+
+ # Get Destination Datastore or Datastore Cluster if specified by user
+ dest_datastore = self.params.get('destination_datastore', None)
+ dest_datastore_cluster = self.params.get('destination_datastore_cluster', None)
+
+ if dest_datastore and dest_datastore_cluster:
+ self.module.fail_json(msg="Please only define one: destination_datastore or destination_datastore_cluster")
+
+ self.datastore_object = None
+ self.datastore_cluster_object = None
+
+ if dest_datastore is not None:
+ self.datastore_object = find_datastore_by_name(content=self.content,
+ datastore_name=dest_datastore,
+ datacenter_name=dest_datacenter)
+ if dest_datastore_cluster is not None:
+ # unable to use find_datastore_cluster_by_name module
+ data_store_clusters = get_all_objs(self.content, [vim.StoragePod], folder=self.content.rootFolder)
+ for dsc in data_store_clusters:
+ if dsc.name == dest_datastore_cluster:
+ self.datastore_cluster_object = dsc
+
+ # At-least one of datastore, datastore cluster, host system or cluster is required to migrate
+ if self.datastore_object is None and self.datastore_cluster_object is None and self.host_object is None and self.cluster_object is None:
+ self.module.fail_json(msg="Unable to find destination datastore, destination datastore cluster,"
+ " destination host system or destination cluster.")
+
+ # Check if datastore is required, this check is required if destination
+ # and source host system does not share same datastore.
+ host_datastore_required = []
+ for vm_datastore in self.vm.datastore:
+ if self.host_object and vm_datastore not in self.host_object.datastore:
+ host_datastore_required.append(True)
+ if self.cluster_object and vm_datastore not in self.cluster_object.datastore:
+ host_datastore_required.append(True)
+ else:
+ host_datastore_required.append(False)
+
+ if any(host_datastore_required) and (dest_datastore is None and dest_datastore_cluster is None):
+ msg = "Destination host system or cluster does not share" \
+ " datastore ['%s'] with source host system ['%s'] on which" \
+ " virtual machine is located. Please specify destination_datastore or destination_datastore_cluster" \
+ " to rectify this problem." % ("', '".join([ds.name for ds in (self.host_object.datastore
+ or self.cluster_object.datastore)]),
+ "', '".join([ds.name for ds in self.vm.datastore]))
+
+ self.module.fail_json(msg=msg)
+
+ # Check for changes
+ storage_vmotion_needed = True
+ change_required = True
+ vm_ds_name = self.vm.config.files.vmPathName.split(' ', 1)[0].replace('[', '').replace(']', '')
+ if self.host_object and self.datastore_object:
+ # We have both host system and datastore object
+ if not self.datastore_object.summary.accessible:
+ # Datastore is not accessible
+ self.module.fail_json(msg='Destination datastore %s is'
+ ' not accessible.' % dest_datastore)
+
+ if self.datastore_object not in self.host_object.datastore:
+ # Datastore is not associated with host system
+ self.module.fail_json(msg="Destination datastore %s provided"
+ " is not associated with destination"
+ " host system %s. Please specify"
+ " datastore value ['%s'] associated with"
+ " the given host system." % (dest_datastore,
+ dest_host_name,
+ "', '".join([ds.name for ds in self.host_object.datastore])))
+
+ if self.vm.runtime.host.name == dest_host_name and dest_datastore in [ds.name for ds in self.vm.datastore]:
+ change_required = False
+
+ elif self.host_object and self.datastore_cluster_object:
+ if not set(self.datastore_cluster_object.childEntity) <= set(self.host_object.datastore):
+ self.module.fail_json(msg="Destination datastore cluster %s provided"
+ " is not associated with destination"
+ " host system %s. Please specify"
+ " datastore value ['%s'] associated with"
+ " the given host system." % (dest_datastore_cluster,
+ dest_host_name,
+ "', '".join([ds.name for ds in
+ self.host_object.datastore])))
+ if self.vm.runtime.host.name == dest_host_name and vm_ds_name in [ds.name for ds in
+ self.datastore_cluster_object.childEntity]:
+ change_required = False
+
+ elif self.cluster_object and self.datastore_object:
+ if not self.datastore_object.summary.accessible:
+ # Datastore is not accessible
+ self.module.fail_json(msg='Destination datastore %s is'
+ ' not accessible.' % dest_datastore)
+
+ if self.datastore_object not in self.cluster_object.datastore:
+ # Datastore is not associated with host system
+ self.module.fail_json(msg="Destination datastore %s provided"
+ " is not associated with destination"
+ " cluster %s. Please specify"
+ " datastore value ['%s'] associated with"
+ " the given host system." % (dest_datastore,
+ dest_cluster_name,
+ "', '".join([ds.name for ds in
+ self.cluster_object.datastore])))
+
+ if self.vm.runtime.host.name in [host.name for host in self.cluster_hosts] and \
+ dest_datastore in [ds.name for ds in self.vm.datastore]:
+ change_required = False
+
+ elif self.cluster_object and self.datastore_cluster_object:
+ if not set(self.datastore_cluster_object.childEntity) <= set(self.cluster_object.datastore):
+ self.module.fail_json(msg="Destination datastore cluster %s provided"
+ " is not associated with destination"
+ " cluster %s. Please specify"
+ " datastore value ['%s'] associated with"
+ " the given host system." % (dest_datastore_cluster,
+ dest_cluster_name,
+ "', '".join([ds.name for ds in
+ self.cluster_object.datastore])))
+ if self.vm.runtime.host.name in [host.name for host in self.cluster_hosts] and \
+ vm_ds_name in [ds.name for ds in self.datastore_cluster_object.childEntity]:
+ change_required = False
+
+ elif (self.host_object and self.datastore_object is None) or (
+ self.host_object and self.datastore_cluster_object is None):
+ if self.vm.runtime.host.name == dest_host_name:
+ # VM is already located on same host
+ change_required = False
+
+ storage_vmotion_needed = False
+
+ elif (self.cluster_object and self.datastore_object is None) or (
+ self.cluster_object and self.datastore_cluster_object is None):
+ if self.vm.runtime.host.name in [host.name for host in self.cluster_hosts]:
+ # VM is already located on this cluster
+ change_required = False
+
+ storage_vmotion_needed = False
+
+ elif (self.datastore_object and self.host_object is None) or (
+ self.datastore_object and self.cluster_object is None):
+ if self.datastore_object in self.vm.datastore:
+ # VM is already located on same datastore
+ change_required = False
+
+ if not self.datastore_object.summary.accessible:
+ # Datastore is not accessible
+ self.module.fail_json(msg='Destination datastore %s is'
+ ' not accessible.' % dest_datastore)
+
+ elif (self.datastore_cluster_object and self.host_object is None) or (
+ self.datastore_cluster_object and self.cluster_object is None):
+ if vm_ds_name in [ds.name for ds in self.datastore_cluster_object.childEntity]:
+ # VM is already located on a datastore in the datastore cluster
+ change_required = False
+
+ if self.cluster_object or self.datastore_cluster_object:
+ self.set_placement()
+
+ # Get Destination resourcepool
+ dest_resourcepool = self.params.get('destination_resourcepool', None)
+ self.resourcepool_object = None
+ if dest_resourcepool:
+ self.resourcepool_object = find_resource_pool_by_name(content=self.content,
+ resource_pool_name=dest_resourcepool)
+ if self.resourcepool_object is None:
+ self.module.fail_json(msg="Unable to find destination resource pool object for %s" % dest_resourcepool)
+ elif not dest_resourcepool and self.host_object:
+ self.resourcepool_object = self.host_object.parent.resourcePool
+
+ if module.check_mode:
+ if self.host_object:
+ result['running_host'] = self.host_object.name
+ if self.datastore_object:
+ result['datastore'] = self.datastore_object.name
+ result['changed'] = change_required
+ module.exit_json(**result)
+
+ if change_required:
+ # Migrate VM and get Task object back
+ task_object = self.migrate_vm()
+ # Wait for task to complete
+ try:
+ wait_for_task(task_object, timeout=self.timeout)
+ except TaskError as task_error:
+ self.module.fail_json(msg=to_native(task_error))
+ # If task was a success the VM has moved, update running_host and complete module
+ if task_object.info.state == vim.TaskInfo.State.success:
+ # The storage layout is not automatically refreshed, so we trigger it to get coherent module return values
+ if storage_vmotion_needed:
+ self.vm.RefreshStorageInfo()
+ if self.host_object:
+ result['running_host'] = self.host_object.name
+ if self.datastore_object:
+ result['datastore'] = self.datastore_object.name
+ result['changed'] = True
+ module.exit_json(**result)
+ else:
+ msg = 'Unable to migrate virtual machine due to an error, please check vCenter'
+ if task_object.info.error is not None:
+ msg += " : %s" % task_object.info.error
+ module.fail_json(msg=msg)
+ else:
+ try:
+ if self.host_object:
+ result['running_host'] = self.host_object.name
+ if self.datastore_object:
+ result['datastore'] = self.datastore_object.name
+ except vim.fault.NoPermission:
+ result['running_host'] = 'NA'
+ result['datastore'] = 'NA'
+ result['changed'] = False
+ module.exit_json(**result)
+
+ def migrate_vm(self):
+ """
+ Migrate virtual machine and return the task.
+ """
+ relocate_spec = vim.vm.RelocateSpec(host=self.host_object,
+ datastore=self.datastore_object,
+ pool=self.resourcepool_object)
+ task_object = self.vm.Relocate(relocate_spec)
+ return task_object
+
+ def set_placement(self):
+ """
+ Get the host from the cluster and/or the datastore from datastore cluster.
+ """
+ if self.cluster_object is None:
+ if self.host_object:
+ self.cluster_object = self.host_object.parent
+ else:
+ self.cluster_object = self.vm.runtime.host.parent
+
+ if not self.cluster_object.configuration.drsConfig.enabled:
+ self.module.fail_json(
+ msg='destination_cluster or destination_storage_cluster is only allowed for clusters with active drs.')
+
+ relocate_spec = vim.vm.RelocateSpec(host=self.host_object,
+ datastore=self.datastore_object)
+ if self.datastore_cluster_object:
+ storagePods = [self.datastore_cluster_object]
+ else:
+ storagePods = None
+ placement_spec = vim.cluster.PlacementSpec(storagePods=storagePods,
+ hosts=self.cluster_hosts,
+ vm=self.vm,
+ relocateSpec=relocate_spec)
+ placement = self.cluster_object.PlaceVm(placement_spec)
+
+ if self.host_object is None:
+ self.host_object = placement.recommendations[0].action[0].targetHost
+ if self.datastore_object is None:
+ self.datastore_object = placement.recommendations[0].action[0].relocateSpec.datastore
+
+ def get_vm(self):
+ """
+ Find unique virtual machine either by UUID or Name.
+ Returns: virtual machine object if found, else None.
+
+ """
+ vms = []
+ if self.vm_uuid:
+ if not self.use_instance_uuid:
+ vm_obj = find_vm_by_id(self.content, vm_id=self.params['vm_uuid'], vm_id_type="uuid")
+ elif self.use_instance_uuid:
+ vm_obj = find_vm_by_id(self.content, vm_id=self.params['vm_uuid'], vm_id_type="instance_uuid")
+ vms = [vm_obj]
+ elif self.vm_name:
+ objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
+ for temp_vm_object in objects:
+ if len(temp_vm_object.propSet) != 1:
+ continue
+ if temp_vm_object.obj.name == self.vm_name:
+ vms.append(temp_vm_object.obj)
+ break
+ elif self.moid:
+ vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.moid, self.si._stub)
+ if vm_obj:
+ vms.append(vm_obj)
+
+ if len(vms) > 1:
+ self.module.fail_json(msg="Multiple virtual machines with same name %s found."
+ " Please specify vm_uuid instead of vm_name." % self.vm_name)
+
+ if vms:
+ self.vm = vms[0]
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ vm_name=dict(aliases=['vm']),
+ vm_uuid=dict(aliases=['uuid']),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ destination_host=dict(aliases=['destination']),
+ destination_resourcepool=dict(aliases=['resource_pool']),
+ destination_datastore=dict(aliases=['datastore']),
+ destination_datacenter=dict(type='str'),
+ destination_cluster=dict(type='str'),
+ destination_datastore_cluster=dict(type='str'),
+ timeout=dict(type='int', default=3600)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['destination_host', 'destination_datastore', 'destination_cluster', 'destination_datastore_cluster'],
+ ['vm_uuid', 'vm_name', 'moid'],
+ ],
+ mutually_exclusive=[
+ ['vm_uuid', 'vm_name', 'moid'],
+ ],
+ )
+
+ VmotionManager(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vsan_cluster.py b/ansible_collections/community/vmware/plugins/modules/vmware_vsan_cluster.py
new file mode 100644
index 000000000..451f801d5
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vsan_cluster.py
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Russell Teague <rteague2 () csc.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vsan_cluster
+short_description: Configure VSAN clustering on an ESXi host
+description:
+ - This module can be used to configure VSAN clustering on an ESXi host
+author:
+- Russell Teague (@mtnbikenc)
+options:
+ cluster_uuid:
+ description:
+ - Desired cluster UUID
+ required: false
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure VMware VSAN Cluster
+ hosts: deploy_node
+ tags:
+ - vsan
+ tasks:
+ - name: Configure VSAN on first host
+ community.vmware.vmware_vsan_cluster:
+ hostname: "{{ groups['esxi'][0] }}"
+ username: "{{ esxi_username }}"
+ password: "{{ site_password }}"
+ delegate_to: localhost
+ register: vsan_cluster
+
+ - name: Configure VSAN on remaining hosts
+ community.vmware.vmware_vsan_cluster:
+ hostname: "{{ item }}"
+ username: "{{ esxi_username }}"
+ password: "{{ site_password }}"
+ cluster_uuid: "{{ vsan_cluster.cluster_uuid }}"
+ delegate_to: localhost
+ loop: "{{ groups['esxi'][1:] }}"
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec,
+ wait_for_task)
+
+
+def create_vsan_cluster(host_system, new_cluster_uuid):
+ host_config_manager = host_system.configManager
+ vsan_system = host_config_manager.vsanSystem
+
+ vsan_config = vim.vsan.host.ConfigInfo()
+ vsan_config.enabled = True
+
+ if new_cluster_uuid is not None:
+ vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
+ vsan_config.clusterInfo.uuid = new_cluster_uuid
+
+ vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
+ vsan_config.storageInfo.autoClaimStorage = True
+
+ task = vsan_system.UpdateVsan_Task(vsan_config)
+ changed, result = wait_for_task(task)
+
+ host_status = vsan_system.QueryHostStatus()
+ cluster_uuid = host_status.uuid
+
+ return changed, result, cluster_uuid
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(cluster_uuid=dict(required=False, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ new_cluster_uuid = module.params['cluster_uuid']
+
+ try:
+ content = connect_to_api(module, False)
+ host = get_all_objs(content, [vim.HostSystem])
+ if not host:
+ module.fail_json(msg="Unable to locate Physical Host.")
+ host_system = list(host)[0]
+ changed, result, cluster_uuid = create_vsan_cluster(host_system, new_cluster_uuid)
+ module.exit_json(changed=changed, result=result, cluster_uuid=cluster_uuid)
+
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vsan_hcl_db.py b/ansible_collections/community/vmware/plugins/modules/vmware_vsan_hcl_db.py
new file mode 100644
index 000000000..6f0100330
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vsan_hcl_db.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2023, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vsan_hcl_db
+version_added: '3.5.0'
+short_description: Manages the vSAN Hardware Compatibility List (HCL) database
+description:
+ - Manages vSAN HCL db on vSphere
+ - DB file can be downloaded from https://partnerweb.vmware.com/service/vsan/all.json
+author:
+ - Philipp Fruck (@p-fruck)
+requirements:
+ - vSAN Management SDK, which needs to be downloaded from VMware and installed manually.
+options:
+ source:
+ description:
+ - The path to the HCL db file
+ type: str
+ required: true
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Fetch HCL db file
+ ansible.builtin.get_url:
+ url: https://partnerweb.vmware.com/service/vsan/all.json
+ dest: hcl_db.json
+ force: true
+ delegate_to: localhost
+
+- name: Upload HCL db file to vCenter
+ community.vmware.vmware_vsan_hcl_db:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ source: hcl_db.json
+ delegate_to: localhost
+'''
+
+RETURN = r'''#
+'''
+
+import base64
+import gzip
+import traceback
+
+VSANPYTHONSDK_IMP_ERR = None
+try:
+ import vsanapiutils
+ HAS_VSANPYTHONSDK = True
+except ImportError:
+ VSANPYTHONSDK_IMP_ERR = traceback.format_exc()
+ HAS_VSANPYTHONSDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VsanApi(PyVmomi):
+ def __init__(self, module):
+ super(VsanApi, self).__init__(module)
+
+ client_stub = self.si._GetStub()
+ ssl_context = client_stub.schemeArgs.get('context')
+ apiVersion = vsanapiutils.GetLatestVmodlVersion(module.params['hostname'])
+ vcMos = vsanapiutils.GetVsanVcMos(client_stub, context=ssl_context, version=apiVersion)
+ self.vsanClusterHealthSystem = vcMos['vsan-cluster-health-system']
+
+ def upload_hcl_db(self, content):
+ compressed = gzip.compress(content)
+ payload_b64 = base64.b64encode(compressed).decode('ascii')
+ self.vsanClusterHealthSystem.VsanVcUploadHclDb(db=payload_b64)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+
+ argument_spec.update(dict(
+ source=dict(type='str', required=True)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ source = module.params['source']
+ api = VsanApi(module)
+
+ try:
+ with open(source, 'rb') as f:
+ api.upload_hcl_db(f.read())
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vsan_health_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_vsan_health_info.py
new file mode 100644
index 000000000..89db649de
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vsan_health_info.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2019, OVH SAS
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vsan_health_info
+short_description: Gather information about a VMware vSAN cluster's health
+description:
+ - "Gather information about a VMware vSAN cluster's health."
+options:
+ datacenter:
+ description:
+ - Name of the Datacenter.
+ required: false
+ type: str
+ aliases: [ 'datacenter_name' ]
+ cluster_name:
+ description:
+ - Name of the vSAN cluster.
+ required: true
+ type: str
+ fetch_from_cache:
+ description:
+ - C(true) to return the result from cache directly instead of running the full health check.
+ required: false
+ default: false
+ type: bool
+requirements:
+ - VMware vSAN Python's SDK
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+author:
+ - Erwan Quelin (@equelin)
+'''
+
+EXAMPLES = r'''
+- name: Gather health info from a vSAN's cluster
+ community.vmware.vmware_vsan_health_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ cluster_name: 'vSAN01'
+ fetch_from_cache: false
+
+- name: Gather health info from a vSAN's cluster with datacenter
+ community.vmware.vmware_vsan_health_info:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ cluster_name: 'vSAN01'
+ datacenter: 'Datacenter_01'
+ fetch_from_cache: true
+'''
+
+RETURN = r'''
+vsan_health_info:
+ description: vSAN cluster health info
+ returned: on success
+ type: dict
+ sample: {
+ "_vimtype": "vim.cluster.VsanClusterHealthSummary",
+ "burnInTest": null,
+ "clusterStatus": {
+ "_vimtype": "vim.cluster.VsanClusterHealthSystemStatusResult",
+ "goalState": "installed",
+ "status": "green",
+ "trackedHostsStatus": [
+ {
+ "_vimtype": "vim.host.VsanHostHealthSystemStatusResult",
+ "hostname": "esxi01.example.com",
+ "issues": [],
+ "status": "green"
+ },
+ {
+ "_vimtype": "vim.host.VsanHostHealthSystemStatusResult",
+ "hostname": "esxi04.example.com",
+ "issues": [],
+ "status": "green"
+ },
+ {
+ "_vimtype": "vim.host.VsanHostHealthSystemStatusResult",
+ "hostname": "esxi02.example.com",
+ "issues": [],
+ "status": "green"
+ },
+ {
+ "_vimtype": "vim.host.VsanHostHealthSystemStatusResult",
+ "hostname": "esxi03.example.com",
+ "issues": [],
+ "status": "green"
+ }
+ ],
+ "untrackedHosts": []
+ }
+ }
+'''
+
+import json
+import traceback
+
+try:
+ from pyVmomi import vmodl, VmomiSupport
+ HAS_PYVMOMI = True
+ HAS_PYVMOMIJSON = hasattr(VmomiSupport, 'VmomiJSONEncoder')
+except ImportError:
+ PYVMOMI_IMP_ERR = traceback.format_exc()
+ HAS_PYVMOMI = False
+ HAS_PYVMOMIJSON = False
+
+VSANPYTHONSDK_IMP_ERR = None
+try:
+ import vsanapiutils
+ HAS_VSANPYTHONSDK = True
+except ImportError:
+ VSANPYTHONSDK_IMP_ERR = traceback.format_exc()
+ HAS_VSANPYTHONSDK = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VSANInfoManager(PyVmomi):
+ def __init__(self, module):
+ super(VSANInfoManager, self).__init__(module)
+ self.datacenter = None
+ self.cluster = None
+
+ def gather_info(self):
+ datacenter_name = self.module.params.get('datacenter')
+ if datacenter_name:
+ self.datacenter = self.find_datacenter_by_name(datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter %s does not exist." % datacenter_name)
+
+ cluster_name = self.module.params.get('cluster_name')
+ self.cluster = self.find_cluster_by_name(cluster_name=cluster_name, datacenter_name=self.datacenter)
+ if self.cluster is None:
+ self.module.fail_json(msg="Cluster %s does not exist." % cluster_name)
+
+ fetch_from_cache = self.module.params.get('fetch_from_cache')
+
+ client_stub = self.si._GetStub()
+ ssl_context = client_stub.schemeArgs.get('context')
+
+ api_version = vsanapiutils.GetLatestVmodlVersion(self.module.params['hostname'])
+ vc_mos = vsanapiutils.GetVsanVcMos(client_stub, context=ssl_context, version=api_version)
+
+ vsan_cluster_health_system = vc_mos['vsan-cluster-health-system']
+
+ cluster_health = {}
+ try:
+ cluster_health = vsan_cluster_health_system.VsanQueryVcClusterHealthSummary(
+ cluster=self.cluster,
+ fetchFromCache=fetch_from_cache,
+ )
+ except vmodl.fault.NotFound as not_found:
+ self.module.fail_json(msg=not_found.msg)
+ except vmodl.fault.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+
+ health = json.dumps(cluster_health, cls=VmomiSupport.VmomiJSONEncoder, sort_keys=True, strip_dynamic=True)
+
+ self.module.exit_json(changed=False, vsan_health_info=json.loads(health))
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(required=False, type='str', aliases=['datacenter_name']),
+ cluster_name=dict(required=True, type='str'),
+ fetch_from_cache=dict(required=False, type='bool', default=False)
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not HAS_VSANPYTHONSDK:
+ module.fail_json(msg=missing_required_lib('vSAN Management SDK for Python'), exception=VSANPYTHONSDK_IMP_ERR)
+
+ if not HAS_PYVMOMIJSON:
+ module.fail_json(msg='The installed version of pyvmomi lacks JSON output support; need pyvmomi>6.7.1')
+
+ vsan_info_manager = VSANInfoManager(module)
+ vsan_info_manager.gather_info()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vsan_release_catalog.py b/ansible_collections/community/vmware/plugins/modules/vmware_vsan_release_catalog.py
new file mode 100644
index 000000000..14a464f2e
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vsan_release_catalog.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2023, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vsan_release_catalog
+version_added: '3.7.0'
+short_description: Uploads the vSAN Release Catalog
+description:
+ - Manually upload the vSAN Release Catalog the the vCenter
+ - See https://kb.vmware.com/s/article/58891 for more details
+author:
+ - Philipp Fruck (@p-fruck)
+requirements:
+ - vSAN Management SDK, which needs to be downloaded from VMware and installed manually.
+options:
+ source:
+ description:
+ - The path to the release catalog file
+ type: str
+ required: true
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Upload release catalog file to vCenter
+ community.vmware.vmware_vsan_release_catalog:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ source: release_catalog.json
+ delegate_to: localhost
+'''
+
+RETURN = r'''#
+'''
+
+import traceback
+
+VSANPYTHONSDK_IMP_ERR = None
+try:
+ import vsanapiutils
+ HAS_VSANPYTHONSDK = True
+except ImportError:
+ VSANPYTHONSDK_IMP_ERR = traceback.format_exc()
+ HAS_VSANPYTHONSDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VsanApi(PyVmomi):
+ def __init__(self, module):
+ super(VsanApi, self).__init__(module)
+
+ client_stub = self.si._GetStub()
+ ssl_context = client_stub.schemeArgs.get('context')
+ apiVersion = vsanapiutils.GetLatestVmodlVersion(module.params['hostname'])
+ vcMos = vsanapiutils.GetVsanVcMos(client_stub, context=ssl_context, version=apiVersion)
+ self.vsanVumSystem = vcMos['vsan-vum-system']
+
+ def upload_release_catalog(self, content):
+ self.vsanVumSystem.VsanVcUploadReleaseDb(db=content)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+
+ argument_spec.update(dict(
+ source=dict(type='str', required=True)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ source = module.params['source']
+ api = VsanApi(module)
+
+ try:
+ with open(source, 'r') as f:
+ api.upload_release_catalog(f.read())
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vspan_session.py b/ansible_collections/community/vmware/plugins/modules/vmware_vspan_session.py
new file mode 100644
index 000000000..ac1a2999f
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vspan_session.py
@@ -0,0 +1,669 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, CrySyS Lab <www.crysys.hu>
+# Copyright: (c) 2018, Peter Gyorgy <gyorgy.peter@edu.bme.hu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vspan_session
+short_description: Create or remove a Port Mirroring session.
+description:
+ - This module can be used to create, delete or edit different kind of port mirroring sessions.
+author:
+- Peter Gyorgy (@gyorgypeter)
+options:
+ switch:
+ description:
+ - The name of the distributed vSwitch on which to add or remove the mirroring session.
+ required: true
+ aliases: [ 'switch_name' ]
+ type: str
+ name:
+ description:
+ - Name of the session.
+ required: true
+ type: str
+ state:
+ choices:
+ - 'present'
+ - 'absent'
+ description:
+ - Create or remove the session.
+ required: true
+ type: str
+ session_type:
+ default: 'dvPortMirror'
+ choices:
+ - 'encapsulatedRemoteMirrorSource'
+ - 'remoteMirrorDest'
+ - 'remoteMirrorSource'
+ - 'dvPortMirror'
+ description:
+ - Select the mirroring type.
+ - In C(encapsulatedRemoteMirrorSource) session, Distributed Ports can be used as source entities,
+ and IP address can be used as destination entities.
+ - In C(remoteMirrorDest) session, VLAN IDs can be used as source entities, and
+ Distributed Ports can be used as destination entities.
+ - In C(remoteMirrorSource) session, Distributed Ports can be used as source
+ entities, and uplink ports name can be used as destination entities.
+ - In C(dvPortMirror) session, Distributed Ports can be used as both source and
+ destination entities.
+ required: false
+ type: str
+ enabled:
+ type: bool
+ default: true
+ description:
+ - Whether the session is enabled.
+ description:
+ description:
+ - The description for the session.
+ required: false
+ type: str
+ source_port_transmitted:
+ description:
+ - Source port for which transmitted packets are mirrored.
+ required: false
+ type: str
+ source_port_received:
+ description:
+ - Source port for which received packets are mirrored.
+ required: false
+ type: str
+ destination_port:
+ description:
+ - Destination port that received the mirrored packets.
+ - Also any port designated in the value of this
+ property can not match the source port in any of the Distributed Port Mirroring session.
+ required: false
+ type: str
+ encapsulation_vlan_id:
+ description:
+ - VLAN ID used to encapsulate the mirrored traffic.
+ required: false
+ type: int
+ strip_original_vlan:
+ description:
+ - Whether to strip the original VLAN tag.
+ - If false, the original VLAN tag will be preserved on the mirrored traffic.
+ - If C(encapsulationVlanId) has been set and this property is false, the frames will be double tagged
+ with the original VLAN ID as the inner tag.
+ type: bool
+ required: false
+ mirrored_packet_length:
+ description:
+ - An integer that describes how much of each frame to mirror.
+ - If unset, all of the frame would be mirrored.
+ - Setting this property to a smaller value is useful when the consumer will look only at the headers.
+ - The value cannot be less than 60.
+ required: false
+ type: int
+ normal_traffic_allowed:
+ description:
+ - Whether or not destination ports can send and receive "normal" traffic.
+ - Setting this to false will make mirror ports be used solely for mirroring and not double as normal access ports.
+ type: bool
+ required: false
+ sampling_rate:
+ description:
+ - Sampling rate of the session.
+ - If its value is n, one of every n packets is mirrored.
+ - Valid values are between 1 to 65535.
+ type: int
+ required: false
+ source_vm_transmitted:
+ description:
+ - With this parameter it is possible, to add a NIC of a VM to a port mirroring session.
+ suboptions:
+ name:
+ description:
+ - Name of the VM.
+ type: str
+ nic_label:
+ description:
+ - Label of the network interface card to use.
+ type: str
+ type: dict
+ source_vm_received:
+ description:
+ - With this parameter it is possible, to add a NIC of a VM to a port mirroring session.
+ suboptions:
+ name:
+ description:
+ - Name of the VM.
+ type: str
+ nic_label:
+ description:
+ - Label of the network interface card to use.
+ type: str
+ type: dict
+ destination_vm:
+ description:
+ - With this parameter it is possible, to add a NIC of a VM to a port mirroring session.
+ suboptions:
+ name:
+ description:
+ - Name of the VM.
+ type: str
+ nic_label:
+ description:
+ - Label of the network interface card to use.
+ type: str
+ required: false
+ type: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create distributed mirroring session.
+ community.vmware.vmware_vspan_session:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ switch_name: dvSwitch
+ state: present
+ name: Basic Session
+ enabled: true
+ description: "Example description"
+ source_port_transmitted: 817
+ source_port_received: 817
+ destination_port: 815
+ delegate_to: localhost
+
+- name: Create remote destination mirroring session.
+ community.vmware.vmware_vspan_session:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ switch_name: dvSwitch
+ state: present
+ name: Remote Session
+ enabled: true
+ description: "Example description"
+ source_port_received: 105
+ destination_port: 815
+ session_type: "remoteMirrorDest"
+ delegate_to: localhost
+
+- name: Delete remote destination mirroring session.
+ community.vmware.vmware_vspan_session:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ switch_name: dvSwitch
+ state: absent
+ name: Remote Session
+ delegate_to: localhost
+'''
+
+RETURN = r'''#
+'''
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import (
+ vmware_argument_spec, PyVmomi, find_dvs_by_name,
+ find_vm_by_name, wait_for_task)
+
+
+class VMwareVspanSession(PyVmomi):
+ def __init__(self, module):
+ super(VMwareVspanSession, self).__init__(module)
+ self.switch = module.params['switch']
+ self.name = module.params['name']
+ self.session_type = module.params['session_type']
+ self.enabled = module.params['enabled']
+ self.state = module.params['state']
+ self.description = module.params['description']
+ self.source_port_transmitted = module.params['source_port_transmitted']
+ self.source_port_received = module.params['source_port_received']
+ self.destination_port = module.params['destination_port']
+ self.encapsulation_vlan_id = module.params['encapsulation_vlan_id']
+ self.strip_original_vlan = module.params['strip_original_vlan']
+ self.mirrored_packet_length = module.params['mirrored_packet_length']
+ self.normal_traffic_allowed = module.params['normal_traffic_allowed']
+ self.sampling_rate = module.params['sampling_rate']
+ self.dv_switch = find_dvs_by_name(self.content, self.switch)
+ if self.dv_switch is None:
+ self.module.fail_json(msg="There is no dvSwitch with the name: {0:s}.".format(self.switch))
+ self.operation = None
+ self.modified_ports = dict()
+ self.deleted_session = None
+ if module.params['source_vm_transmitted'] is not None:
+ if (module.params['source_vm_transmitted']['name'] is None
+ or module.params['source_vm_transmitted']['nic_label'] is None):
+ self.module.fail_json(msg="Please provide both VM name and NIC Label")
+ self.source_vm_transmitted_name = module.params['source_vm_transmitted']['name']
+ self.source_vm_transmitted_nic_label = module.params['source_vm_transmitted']['nic_label']
+ if module.params['source_vm_received'] is not None:
+ if (module.params['source_vm_received']['name'] is None
+ or module.params['source_vm_received']['nic_label'] is None):
+ self.module.fail_json(msg="Please provide both VM name and NIC Label")
+ self.source_vm_received_name = module.params['source_vm_received']['name']
+ self.source_vm_received_nic_label = module.params['source_vm_received']['nic_label']
+ if module.params['destination_vm'] is not None:
+ if (module.params['destination_vm']['name'] is None
+ or module.params['destination_vm']['nic_label'] is None):
+ self.module.fail_json(msg="Please provide both VM name and NIC Label")
+ self.destination_vm_name = module.params['destination_vm']['name']
+ self.destination_vm_nic_label = module.params['destination_vm']['nic_label']
+
+ def set_operation(self):
+ """Sets the operation according to state"""
+ if self.state == 'absent':
+ self.operation = 'remove'
+ elif self.state == 'present' and self.find_session_by_name() is None:
+ self.operation = 'add'
+ else:
+ self.operation = 'edit'
+
+ def find_session_by_name(self):
+ """Finds a session by name
+ Returns
+ -------
+ vim.dvs.VmwareDistributedVirtualSwitch.VspanSession
+ The session if there was a session by the given name, else returns None
+ """
+ for vspan_session in self.dv_switch.config.vspanSession:
+ if vspan_session.name == self.name:
+ return vspan_session
+ return None
+
+ def get_vm_port(self, vm_name, nic_label):
+ """Finds the port of the VM
+ Returns
+ -------
+ str
+ the port number as a string, or None if the NIC couldnt be found
+ """
+ vm = find_vm_by_name(self.content, vm_name)
+ if vm is None:
+ self.module.fail_json(msg="There is no VM with the name: {0:s}.".format(vm_name))
+ for hardware in vm.config.hardware.device:
+ if isinstance(hardware, vim.vm.device.VirtualVmxnet3):
+ if hardware.deviceInfo.label == nic_label:
+ return hardware.backing.port.portKey
+ return None
+
+ def set_port_for_vm(self):
+ """Sets the ports, to the VM's specified port."""
+ if hasattr(self, 'source_vm_transmitted_name') and hasattr(self, 'source_vm_transmitted_nic_label'):
+ port = self.get_vm_port(self.source_vm_transmitted_name, self.source_vm_transmitted_nic_label)
+ if port is not None:
+ self.source_port_transmitted = port
+ else:
+ self.module.fail_json(
+ msg="No port could be found for VM: {0:s} NIC: {1:s}".format(self.source_vm_transmitted_name,
+ self.source_vm_transmitted_nic_label))
+ if hasattr(self, 'source_vm_received_name') and hasattr(self, 'source_vm_received_nic_label'):
+ port = self.get_vm_port(self.source_vm_received_name, self.source_vm_received_nic_label)
+ if port is not None:
+ self.source_port_received = port
+ else:
+ self.module.fail_json(
+ msg="No port could be found for VM: {0:s} NIC: {1:s}".format(self.source_vm_received_name,
+ self.source_vm_received_nic_label))
+ if hasattr(self, 'destination_vm_name') and hasattr(self, 'destination_vm_nic_label'):
+ port = self.get_vm_port(self.destination_vm_name, self.destination_vm_nic_label)
+ if port is not None:
+ self.destination_port = port
+ else:
+ self.module.fail_json(
+ msg="No port could be found for VM: {0:s} NIC: {1:s}".format(self.destination_vm_name,
+ self.destination_vm_nic_label))
+
+ def process_operation(self):
+ """Calls the create or delete function based on the operation"""
+ self.set_operation()
+ if self.operation == 'remove':
+ results = self.remove_vspan_session()
+ self.module.exit_json(**results)
+ if self.operation == 'add':
+ self.set_port_for_vm()
+ results = self.add_vspan_session()
+ self.module.exit_json(**results)
+ if self.operation == 'edit':
+ self.remove_vspan_session()
+ self.set_port_for_vm()
+ results = self.add_vspan_session()
+ self.module.exit_json(**results)
+
+ def set_port_security_promiscuous(self, ports, state):
+ """Set the given port to the given promiscuous state.
+ Parameters
+ ----------
+ port : str[]
+ PortKey
+ state: bool
+ State of the promiscuous mode, if true its allowed, else not.
+ """
+ # Creating the new port policy
+ port_spec = []
+ port_policy = vim.dvs.VmwareDistributedVirtualSwitch.MacManagementPolicy(allowPromiscuous=state)
+ port_settings = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy(macManagementPolicy=port_policy)
+ for port in ports:
+ temp_port_spec = vim.dvs.DistributedVirtualPort.ConfigSpec(
+ operation="edit",
+ key=port,
+ setting=port_settings
+ )
+ port_spec.append(temp_port_spec)
+
+ task = self.dv_switch.ReconfigureDVPort_Task(port_spec)
+ try:
+ wait_for_task(task)
+ except Exception:
+ self.restore_original_state()
+ self.module.fail_json(msg=task.info.error.msg)
+
+ def turn_off_promiscuous(self):
+ """Disable all promiscuous mode ports, and give them back in a list.
+ Returns
+ -------
+ list
+ Contains every port, where promiscuous mode has been turned off
+ """
+ # Ports that are in mirror sessions
+ ports = []
+ ports_of_selected_session = []
+ for vspan_session in self.dv_switch.config.vspanSession:
+ if vspan_session.sourcePortReceived is not None:
+ session_ports = vspan_session.sourcePortReceived.portKey
+ for port in session_ports:
+ if vspan_session.name == self.name:
+ ports_of_selected_session.append(port)
+ elif not (port in ports):
+ ports.append(port)
+ if vspan_session.sourcePortTransmitted is not None:
+ session_ports = vspan_session.sourcePortTransmitted.portKey
+ for port in session_ports:
+ if vspan_session.name == self.name:
+ ports_of_selected_session.append(port)
+ elif not (port in ports):
+ ports.append(port)
+ if vspan_session.destinationPort is not None:
+ session_ports = vspan_session.destinationPort.portKey
+ for port in session_ports:
+ if vspan_session.name == self.name:
+ ports_of_selected_session.append(port)
+ elif not (port in ports):
+ ports.append(port)
+ promiscuous_ports = []
+ if ports:
+ dv_ports = self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=ports))
+ # If a port is promiscuous set disable it, and add it to the array to enable it after the changes are made.
+ for dv_port in dv_ports:
+ if dv_port.config.setting.macManagementPolicy.allowPromiscuous:
+ self.set_port_security_promiscuous([dv_port.key], False)
+ self.modified_ports.update({dv_port.key: True})
+ promiscuous_ports.append(dv_port.key)
+ if ports_of_selected_session:
+ current_dv_ports = self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=ports_of_selected_session))
+ for dv_port in current_dv_ports:
+ if dv_port.config.setting.macManagementPolicy.allowPromiscuous:
+ self.set_port_security_promiscuous([dv_port.key], False)
+ self.modified_ports.update({dv_port.key: True})
+ # Return the promiscuous ports array, to set them back after the config is finished.
+ return promiscuous_ports
+
+ def delete_mirroring_session(self, key):
+ """Deletes the mirroring session.
+ Parameters
+ ----------
+ key : str
+ Key of the Session
+ """
+ session = vim.dvs.VmwareDistributedVirtualSwitch.VspanSession(
+ key=key
+ )
+ config_version = self.dv_switch.config.configVersion
+ s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec(vspanSession=session, operation="remove")
+ c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec(vspanConfigSpec=[s_spec], configVersion=config_version)
+ task = self.dv_switch.ReconfigureDvs_Task(c_spec)
+ try:
+ wait_for_task(task)
+ except Exception:
+ self.restore_original_state()
+ self.module.fail_json(msg=task.info.error.msg)
+
+ def restore_original_state(self):
+ """In case of failure restore, the changes we made."""
+ for port, state in self.modified_ports.items():
+ self.set_port_security_promiscuous([port], state)
+ if self.deleted_session is not None:
+ session = self.deleted_session
+ config_version = self.dv_switch.config.configVersion
+ s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec(vspanSession=session, operation="add")
+ c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec(vspanConfigSpec=[s_spec], configVersion=config_version)
+ # Revert the delete
+ task = self.dv_switch.ReconfigureDvs_Task(c_spec)
+ try:
+ wait_for_task(task)
+ except Exception:
+ self.restore_original_state()
+ self.module.fail_json(msg=task.info.error.msg)
+
+ def remove_vspan_session(self):
+ """Calls the necessary functions to delete a VSpanSession."""
+ results = dict(changed=False, result="")
+ mirror_session = self.find_session_by_name()
+ if mirror_session is None:
+ results['result'] = "There is no VSpanSession with the name: {0:s}.".format(self.name)
+ return results
+ promiscuous_ports = self.turn_off_promiscuous()
+ session_key = mirror_session.key
+ # Delete Mirroring Session
+ self.delete_mirroring_session(session_key)
+ # Session
+ self.deleted_session = mirror_session
+ # Set back the promiscuous ports
+ if promiscuous_ports:
+ self.set_port_security_promiscuous(promiscuous_ports, True)
+ results['changed'] = True
+ results['result'] = 'VSpan Session has been deleted'
+ return results
+
+ def check_if_session_name_is_free(self):
+ """Checks whether the name is used or not
+ Returns
+ -------
+ bool
+ True if the name is free and False if it is used.
+ """
+ for vspan_session in self.dv_switch.config.vspanSession:
+ if vspan_session.name == self.name:
+ return False
+ return True
+
+ def check_source_port_transmitted(self, session):
+ if self.source_port_transmitted is not None:
+ port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.source_port_transmitted))
+ if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)):
+ self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.source_port_transmitted))
+ session.sourcePortTransmitted = port
+
+ def check_source_port_received(self, session):
+ if not self.source_port_received:
+ return
+
+ if self.session_type == 'remoteMirrorDest':
+ port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(vlans=[int(self.source_port_received)])
+ if int(self.source_port_received) not in self.dv_switch.QueryUsedVlanIdInDvs():
+ self.module.fail_json(msg="Couldn't find vlan: {0:s}".format(self.source_port_received))
+ session.sourcePortReceived = port
+ else:
+ port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.source_port_received))
+ if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)):
+ self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.source_port_received))
+ session.sourcePortReceived = port
+
+ def check_destination_port(self, session):
+ if not self.destination_port:
+ return
+
+ if self.session_type == 'encapsulatedRemoteMirrorSource':
+ port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(ipAddress=str(self.destination_port))
+ session.destinationPort = port
+ if self.session_type == 'remoteMirrorSource':
+ port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(uplinkPortName=str(self.destination_port))
+ session.destinationPort = port
+ if self.session_type == 'remoteMirrorDest':
+ port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.destination_port))
+ if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)):
+ self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.destination_port))
+ session.destinationPort = port
+ if self.session_type == 'dvPortMirror':
+ port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.destination_port))
+ if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)):
+ self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.destination_port))
+ session.destinationPort = port
+
+ def check_self_properties(self, session):
+ if self.description is not None:
+ session.description = self.description
+ if self.encapsulation_vlan_id is not None:
+ session.encapsulationVlanId = self.encapsulation_vlan_id
+ if self.strip_original_vlan is not None:
+ session.stripOriginalVlan = self.strip_original_vlan
+ if self.mirrored_packet_length is not None:
+ session.mirroredPacketLength = self.mirrored_packet_length
+ if self.normal_traffic_allowed is not None:
+ session.normalTrafficAllowed = self.normal_traffic_allowed
+ if self.sampling_rate is not None:
+ session.samplingRate = self.sampling_rate
+
+ def create_vspan_session(self):
+ """Builds up the session, adds the parameters that we specified, then creates it on the vSwitch"""
+
+ session = vim.dvs.VmwareDistributedVirtualSwitch.VspanSession(
+ name=self.name,
+ enabled=True
+ )
+ if self.session_type is not None:
+ session.sessionType = self.session_type
+ if self.session_type == 'encapsulatedRemoteMirrorSource':
+ self.check_source_port_received(session)
+ self.check_source_port_transmitted(session)
+ self.check_destination_port(session)
+ if self.session_type == 'remoteMirrorSource':
+ self.check_source_port_received(session)
+ self.check_source_port_transmitted(session)
+ self.check_destination_port(session)
+ if self.session_type == 'remoteMirrorDest':
+ self.check_source_port_received(session)
+ self.check_destination_port(session)
+ if self.session_type == 'dvPortMirror':
+ self.check_source_port_received(session)
+ self.check_source_port_transmitted(session)
+ self.check_destination_port(session)
+
+ self.check_self_properties(session)
+
+ config_version = self.dv_switch.config.configVersion
+ s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec(vspanSession=session, operation="add")
+ c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec(vspanConfigSpec=[s_spec], configVersion=config_version)
+ task = self.dv_switch.ReconfigureDvs_Task(c_spec)
+ try:
+ wait_for_task(task)
+ except Exception:
+ self.restore_original_state()
+ self.module.fail_json(msg=task.info.error.msg)
+
+ def add_vspan_session(self):
+ """Calls the necessary functions to create a VSpanSession"""
+ results = dict(changed=False, result="")
+ promiscous_ports = self.turn_off_promiscuous()
+ if not self.check_if_session_name_is_free():
+ self.module.fail_json(msg="There is another VSpan Session with the name: {0:s}.".format(self.name))
+ # Locate the ports, we want to use
+ dv_ports = None
+ ports = [str(self.source_port_received), str(self.source_port_transmitted), str(self.destination_port)]
+ if ports:
+ dv_ports = self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=ports))
+ for dv_port in dv_ports:
+ if dv_port.config.setting.macManagementPolicy.allowPromiscuous:
+ self.set_port_security_promiscuous([dv_port.key], False)
+ self.modified_ports.update({dv_port.key: True})
+ # Now we can create the VspanSession
+ self.create_vspan_session()
+ # Finally we can set the destination port to promiscuous mode
+ if self.session_type == 'dvPortMirror' or self.session_type == 'remoteMirrorDest':
+ self.set_port_security_promiscuous([str(self.destination_port)], True)
+ # Set Back the Promiscuous ports
+ if promiscous_ports:
+ self.set_port_security_promiscuous(promiscous_ports, True)
+ results['changed'] = True
+ results['result'] = 'Mirroring session has been created.'
+ return results
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ switch=dict(type='str', required=True, aliases=['switch_name']),
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ session_type=dict(
+ type='str',
+ default='dvPortMirror',
+ choices=[
+ 'dvPortMirror',
+ 'encapsulatedRemoteMirrorSource',
+ 'remoteMirrorDest',
+ 'remoteMirrorSource'
+ ]
+ ),
+ enabled=dict(type='bool', default=True),
+ description=dict(type='str'),
+ source_port_transmitted=dict(type='str'),
+ source_port_received=dict(type='str'),
+ destination_port=dict(type='str'),
+ encapsulation_vlan_id=dict(type='int'),
+ strip_original_vlan=dict(type='bool'),
+ mirrored_packet_length=dict(type='int'),
+ normal_traffic_allowed=dict(type='bool'),
+ sampling_rate=dict(type='int'),
+ source_vm_transmitted=dict(
+ type='dict',
+ options=dict(
+ name=dict(type='str'),
+ nic_label=dict(type='str')
+ )
+ ),
+ source_vm_received=dict(
+ type='dict',
+ options=dict(
+ name=dict(type='str'),
+ nic_label=dict(type='str')
+ )
+ ),
+ destination_vm=dict(
+ type='dict',
+ options=dict(
+ name=dict(type='str'),
+ nic_label=dict(type='str')
+ )
+ ),
+ ))
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+ session = VMwareVspanSession(module)
+ session.process_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vswitch.py b/ansible_collections/community/vmware/plugins/modules/vmware_vswitch.py
new file mode 100644
index 000000000..56f3b4c38
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vswitch.py
@@ -0,0 +1,803 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vswitch
+short_description: Manage a VMware Standard Switch to an ESXi host.
+description:
+- This module can be used to add, remove and update a VMware Standard Switch to an ESXi host.
+author:
+- Joseph Callen (@jcpowermac)
+- Russell Teague (@mtnbikenc)
+- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
+options:
+ switch:
+ description:
+ - vSwitch name to add.
+ - Alias C(switch) is added in version 2.4.
+ required: true
+ aliases: [ switch_name ]
+ type: str
+ nics:
+ description:
+ - A list of vmnic names or vmnic name to attach to vSwitch.
+ - Alias C(nics) is added in version 2.4.
+ aliases: [ nic_name ]
+ default: []
+ type: list
+ elements: str
+ number_of_ports:
+ description:
+ - Number of port to configure on vSwitch.
+ default: 128
+ type: int
+ mtu:
+ description:
+ - MTU to configure on vSwitch.
+ default: 1500
+ type: int
+ state:
+ description:
+ - Add or remove the switch.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ esxi_hostname:
+ description:
+ - Manage the vSwitch using this ESXi host system.
+ aliases: [ 'host' ]
+ type: str
+ security:
+ description:
+ - Network policy specifies layer 2 security settings for a
+ portgroup such as promiscuous mode, where guest adapter listens
+ to all the packets, MAC address changes and forged transmits.
+ - Dict which configures the different security values for portgroup.
+ version_added: '2.4.0'
+ suboptions:
+ promiscuous_mode:
+ type: bool
+ description: Indicates whether promiscuous mode is allowed.
+ forged_transmits:
+ type: bool
+ description: Indicates whether forged transmits are allowed.
+ mac_changes:
+ type: bool
+ description: Indicates whether mac changes are allowed.
+ required: false
+ aliases: [ 'security_policy', 'network_policy' ]
+ type: dict
+ teaming:
+ description:
+ - Dictionary which configures the different teaming values for portgroup.
+ version_added: '2.4.0'
+ suboptions:
+ load_balancing:
+ type: str
+ description:
+ - Network adapter teaming policy.
+ choices: [ loadbalance_ip, loadbalance_srcmac, loadbalance_srcid, failover_explicit ]
+ aliases: [ 'load_balance_policy' ]
+ network_failure_detection:
+ type: str
+ description: Network failure detection.
+ choices: [ link_status_only, beacon_probing ]
+ notify_switches:
+ type: bool
+ description: Indicate whether or not to notify the physical switch if a link fails.
+ failback:
+ type: bool
+ description: Indicate whether or not to use a failback when restoring links.
+ active_adapters:
+ type: list
+ description:
+ - List of active adapters used for load balancing.
+ - All vmnics are used as active adapters if C(active_adapters) and C(standby_adapters) are not defined.
+ elements: str
+ standby_adapters:
+ type: list
+ description:
+ - List of standby adapters used for failover.
+ - All vmnics are used as active adapters if C(active_adapters) and C(standby_adapters) are not defined.
+ elements: str
+ required: false
+ aliases: [ 'teaming_policy' ]
+ type: dict
+ traffic_shaping:
+ description:
+ - Dictionary which configures traffic shaping for the switch.
+ version_added: '2.4.0'
+ suboptions:
+ enabled:
+ type: bool
+ description: Status of Traffic Shaping Policy.
+ average_bandwidth:
+ type: int
+ description: Average bandwidth (kbit/s).
+ peak_bandwidth:
+ type: int
+ description: Peak bandwidth (kbit/s).
+ burst_size:
+ type: int
+ description: Burst size (KB).
+ required: false
+ type: dict
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Add a VMware vSwitch
+ community.vmware.vmware_vswitch:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ switch: vswitch_name
+ nics: vmnic_name
+ mtu: 9000
+ delegate_to: localhost
+
+- name: Add a VMware vSwitch without any physical NIC attached
+ community.vmware.vmware_vswitch:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ switch: vswitch_0001
+ mtu: 9000
+ delegate_to: localhost
+
+- name: Add a VMware vSwitch with multiple NICs
+ community.vmware.vmware_vswitch:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ switch: vmware_vswitch_0004
+ nics:
+ - vmnic1
+ - vmnic2
+ mtu: 9000
+ delegate_to: localhost
+
+- name: Add a VMware vSwitch to a specific host system
+ community.vmware.vmware_vswitch:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ esxi_hostname: DC0_H0
+ switch_name: vswitch_001
+ nic_name: vmnic0
+ mtu: 9000
+ delegate_to: localhost
+
+- name: Add a VMware vSwitch to a specific host system with Promiscuous Mode Enabled
+ community.vmware.vmware_vswitch:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ esxi_hostname: DC0_H0
+ switch_name: vswitch_001
+ nic_name: vmnic0
+ mtu: 9000
+ security:
+ promiscuous_mode: true
+ delegate_to: localhost
+
+- name: Add a VMware vSwitch to a specific host system with active/standby teaming
+ community.vmware.vmware_vswitch:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ esxi_hostname: DC0_H0
+ switch_name: vswitch_001
+ nic_name:
+ - vmnic0
+ - vmnic1
+ teaming:
+ active_adapters:
+ - vmnic0
+ standby_adapters:
+ - vmnic1
+ delegate_to: localhost
+
+- name: Add a VMware vSwitch to a specific host system with traffic shaping
+ community.vmware.vmware_vswitch:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ esxi_hostname: DC0_H0
+ switch_name: vswitch_001
+ nic_name:
+ - vmnic0
+ - vmnic1
+ traffic_shaping:
+ enabled: true
+ average_bandwidth: 100000
+ peak_bandwidth: 100000
+ burst_size: 102400
+ delegate_to: localhost
+
+- name: Delete a VMware vSwitch in a specific host system
+ community.vmware.vmware_vswitch:
+ hostname: '{{ esxi_hostname }}'
+ username: '{{ esxi_username }}'
+ password: '{{ esxi_password }}'
+ esxi_hostname: DC0_H0
+ switch_name: vswitch_001
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+result:
+ description: information about performed operation
+ returned: always
+ type: str
+ sample: "vSwitch 'vSwitch_1002' is created successfully"
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VMwareHostVirtualSwitch(PyVmomi):
+ def __init__(self, module):
+ super(VMwareHostVirtualSwitch, self).__init__(module)
+ self.host_system = None
+ self.vss = None
+ self.switch = module.params['switch']
+ self.number_of_ports = module.params['number_of_ports']
+ self.nics = module.params['nics']
+ self.mtu = module.params['mtu']
+ self.state = module.params['state']
+ esxi_hostname = module.params['esxi_hostname']
+
+ hosts = self.get_all_host_objs(esxi_host_name=esxi_hostname)
+ if hosts:
+ self.host_system = hosts[0]
+ else:
+ self.module.fail_json(msg="Failed to get details of ESXi server."
+ " Please specify esxi_hostname.")
+
+ self.network_mgr = self.host_system.configManager.networkSystem
+ if not self.network_mgr:
+ self.module.fail_json(msg="Failed to find network manager for ESXi system.")
+
+ if self.params.get('state') == 'present':
+ # Gather information about all vSwitches and Physical NICs
+ available_pnic = [pnic.device for pnic in self.network_mgr.networkInfo.pnic]
+ self.available_vswitches = dict()
+ for available_vswitch in self.network_mgr.networkInfo.vswitch:
+ used_pnic = []
+ for pnic in available_vswitch.pnic:
+ # vSwitch contains all PNICs as string in format of 'key-vim.host.PhysicalNic-vmnic0'
+ m_pnic = pnic.split("-", 3)[-1]
+ used_pnic.append(m_pnic)
+ self.available_vswitches[available_vswitch.name] = dict(pnic=used_pnic,
+ mtu=available_vswitch.mtu,
+ num_ports=available_vswitch.spec.numPorts,
+ )
+ for desired_pnic in self.nics:
+ if desired_pnic not in available_pnic:
+ # Check if pnic does not exists
+ self.module.fail_json(msg="Specified Physical NIC '%s' does not"
+ " exists on given ESXi '%s'." % (desired_pnic,
+ self.host_system.name))
+ for vswitch in self.available_vswitches:
+ if desired_pnic in self.available_vswitches[vswitch]['pnic'] and vswitch != self.switch:
+ # Check if pnic is already part of some other vSwitch
+ self.module.fail_json(msg="Specified Physical NIC '%s' is already used"
+ " by vSwitch '%s'." % (desired_pnic, vswitch))
+
+ def process_state(self):
+ """
+ Manage internal state of vSwitch
+ """
+ vswitch_states = {
+ 'absent': {
+ 'present': self.state_destroy_vswitch,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_update_vswitch,
+ 'absent': self.state_create_vswitch,
+ }
+ }
+
+ try:
+ vswitch_states[self.state][self.check_vswitch_configuration()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=to_native(runtime_fault.msg))
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=to_native(method_fault.msg))
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ def state_create_vswitch(self):
+ """
+ Create a virtual switch
+
+ Source from
+ https://github.com/rreubenur/pyvmomi-community-samples/blob/patch-1/samples/create_vswitch.py
+
+ """
+
+ results = dict(changed=False, result="")
+ vss_spec = vim.host.VirtualSwitch.Specification()
+ vss_spec.numPorts = self.number_of_ports
+ vss_spec.mtu = self.mtu
+ if self.nics:
+ vss_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=self.nics)
+
+ if self.module.check_mode:
+ results['msg'] = "vSwitch '%s' would be created" % self.switch
+ else:
+ try:
+ self.network_mgr.AddVirtualSwitch(vswitchName=self.switch,
+ spec=vss_spec)
+
+ changed = False
+ spec = self.find_vswitch_by_name(self.host_system, self.switch).spec
+
+ # Check Security Policy
+ if self.update_security_policy(spec, results):
+ changed = True
+
+ # Check Teaming Policy
+ if self.update_teaming_policy(spec, results):
+ changed = True
+
+ # Check Traffic Shaping Policy
+ if self.update_traffic_shaping_policy(spec, results):
+ changed = True
+
+ if changed:
+ self.network_mgr.UpdateVirtualSwitch(vswitchName=self.switch,
+ spec=spec)
+
+ results['result'] = "vSwitch '%s' is created successfully" % self.switch
+ except vim.fault.AlreadyExists as already_exists:
+ results['result'] = "vSwitch with name %s already exists: %s" % (self.switch,
+ to_native(already_exists.msg))
+ except vim.fault.ResourceInUse as resource_used:
+ self.module.fail_json(msg="Failed to add vSwitch '%s' as physical network adapter"
+ " being bridged is already in use: %s" % (self.switch,
+ to_native(resource_used.msg)))
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(msg="Failed to add vSwitch '%s' due to host"
+ " configuration fault : %s" % (self.switch,
+ to_native(host_config_fault.msg)))
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(msg="Failed to add vSwitch '%s', this can be due to either of following :"
+ " 1. vSwitch Name exceeds the maximum allowed length,"
+ " 2. Number of ports specified falls out of valid range,"
+ " 3. Network policy is invalid,"
+ " 4. Beacon configuration is invalid : %s" % (self.switch,
+ to_native(invalid_argument.msg)))
+ except vmodl.fault.SystemError as system_error:
+ self.module.fail_json(msg="Failed to add vSwitch '%s' due to : %s" % (self.switch,
+ to_native(system_error.msg)))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to add vSwitch '%s' due to"
+ " generic exception : %s" % (self.switch,
+ to_native(generic_exc)))
+
+ results['changed'] = True
+
+ self.module.exit_json(**results)
+
+ def state_exit_unchanged(self):
+ """
+ Declare exit without unchanged
+ """
+ self.module.exit_json(changed=False)
+
+ def state_destroy_vswitch(self):
+ """
+ Remove vSwitch from configuration
+
+ """
+ results = dict(changed=False, result="")
+
+ if self.module.check_mode:
+ results['msg'] = "vSwitch '%s' would be removed" % self.vss.name
+ else:
+ try:
+ self.host_system.configManager.networkSystem.RemoveVirtualSwitch(self.vss.name)
+ results['result'] = "vSwitch '%s' removed successfully." % self.vss.name
+ except vim.fault.NotFound as vswitch_not_found:
+ results['result'] = "vSwitch '%s' not available. %s" % (self.switch,
+ to_native(vswitch_not_found.msg))
+ except vim.fault.ResourceInUse as vswitch_in_use:
+ self.module.fail_json(msg="Failed to remove vSwitch '%s' as vSwitch"
+ " is used by several virtual"
+ " network adapters: %s" % (self.switch,
+ to_native(vswitch_in_use.msg)))
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(msg="Failed to remove vSwitch '%s' due to host"
+ " configuration fault : %s" % (self.switch,
+ to_native(host_config_fault.msg)))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to remove vSwitch '%s' due to generic"
+ " exception : %s" % (self.switch,
+ to_native(generic_exc)))
+
+ results['changed'] = True
+
+ self.module.exit_json(**results)
+
+ def state_update_vswitch(self):
+ """
+ Update vSwitch
+
+ """
+ changed = False
+ results = dict(changed=False, result="No change in vSwitch '%s'" % self.switch)
+ spec = self.vss.spec
+
+ # Check MTU
+ if self.vss.mtu != self.mtu:
+ spec.mtu = self.mtu
+ changed = True
+
+ # Check Number of Ports
+ if spec.numPorts != self.number_of_ports:
+ spec.numPorts = self.number_of_ports
+ changed = True
+
+ # Check nics
+ nics_current = set(map(lambda n: n.rsplit('-', 1)[1], self.vss.pnic))
+ if nics_current != set(self.nics):
+ if self.nics:
+ spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=self.nics)
+ else:
+ spec.bridge = None
+ changed = True
+
+ # Update teaming if not configured specifigaly
+ if not self.params['teaming']:
+ nicOrder = spec.policy.nicTeaming.nicOrder
+ # Remove missing nics from policy
+ if nicOrder.activeNic != [i for i in nicOrder.activeNic if i in self.nics]:
+ nicOrder.activeNic = [i for i in nicOrder.activeNic if i in self.nics]
+ if nicOrder.standbyNic != [i for i in nicOrder.standbyNic if i in self.nics]:
+ nicOrder.standbyNic = [i for i in nicOrder.standbyNic if i in self.nics]
+ # Set new nics as active
+ if set(self.nics) - nics_current:
+ nicOrder.activeNic += set(self.nics) - nics_current
+
+ # Check Security Policy
+ if self.update_security_policy(spec, results):
+ changed = True
+
+ # Check Teaming Policy
+ if self.update_teaming_policy(spec, results):
+ changed = True
+
+ # Check Traffic Shaping Policy
+ if self.update_traffic_shaping_policy(spec, results):
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ results['msg'] = "vSwitch '%s' would be updated" % self.switch
+ else:
+ try:
+ self.network_mgr.UpdateVirtualSwitch(vswitchName=self.switch,
+ spec=spec)
+ results['result'] = "vSwitch '%s' is updated successfully" % self.switch
+ except vim.fault.ResourceInUse as resource_used:
+ self.module.fail_json(msg="Failed to update vSwitch '%s' as physical network adapter"
+ " being bridged is already in use: %s" % (self.switch,
+ to_native(resource_used.msg)))
+ except vim.fault.NotFound as not_found:
+ self.module.fail_json(msg="Failed to update vSwitch with name '%s'"
+ " as it does not exists: %s" % (self.switch,
+ to_native(not_found.msg)))
+
+ except vim.fault.HostConfigFault as host_config_fault:
+ self.module.fail_json(msg="Failed to update vSwitch '%s' due to host"
+ " configuration fault : %s" % (self.switch,
+ to_native(host_config_fault.msg)))
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(msg="Failed to update vSwitch '%s', this can be due to either of following :"
+ " 1. vSwitch Name exceeds the maximum allowed length,"
+ " 2. Number of ports specified falls out of valid range,"
+ " 3. Network policy is invalid,"
+ " 4. Beacon configuration is invalid : %s" % (self.switch,
+ to_native(invalid_argument.msg)))
+ except vmodl.fault.SystemError as system_error:
+ self.module.fail_json(msg="Failed to update vSwitch '%s' due to : %s" % (self.switch,
+ to_native(system_error.msg)))
+ except vmodl.fault.NotSupported as not_supported:
+ self.module.fail_json(msg="Failed to update vSwitch '%s' as network adapter teaming policy"
+ " is set but is not supported : %s" % (self.switch,
+ to_native(not_supported.msg)))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to update vSwitch '%s' due to"
+ " generic exception : %s" % (self.switch,
+ to_native(generic_exc)))
+
+ results['changed'] = True
+
+ self.module.exit_json(**results)
+
+ def check_vswitch_configuration(self):
+ """
+ Check if vSwitch exists
+ Returns: 'present' if vSwitch exists or 'absent' if not
+
+ """
+ self.vss = self.find_vswitch_by_name(self.host_system, self.switch)
+ if self.vss is None:
+ return 'absent'
+ else:
+ return 'present'
+
+ @staticmethod
+ def find_vswitch_by_name(host, vswitch_name):
+ """
+ Find and return vSwitch managed object
+ Args:
+ host: Host system managed object
+ vswitch_name: Name of vSwitch to find
+
+ Returns: vSwitch managed object if found, else None
+
+ """
+ for vss in host.configManager.networkSystem.networkInfo.vswitch:
+ if vss.name == vswitch_name:
+ return vss
+ return None
+
+ def update_security_policy(self, spec, results):
+ """
+ Update the security policy according to the parameters
+ Args:
+ spec: The vSwitch spec
+ results: The results dict
+
+ Returns: True if changes have been made, else false
+ """
+ if not self.params['security'] or not spec.policy.security:
+ return False
+
+ security_policy = spec.policy.security
+ changed = False
+ sec_promiscuous_mode = self.params['security'].get('promiscuous_mode')
+ sec_forged_transmits = self.params['security'].get('forged_transmits')
+ sec_mac_changes = self.params['security'].get('mac_changes')
+
+ if sec_promiscuous_mode is not None:
+ results['sec_promiscuous_mode'] = sec_promiscuous_mode
+ if security_policy.allowPromiscuous is not sec_promiscuous_mode:
+ results['sec_promiscuous_mode_previous'] = security_policy.allowPromiscuous
+ security_policy.allowPromiscuous = sec_promiscuous_mode
+ changed = True
+
+ if sec_mac_changes is not None:
+ results['sec_mac_changes'] = sec_mac_changes
+ if security_policy.macChanges is not sec_mac_changes:
+ results['sec_mac_changes_previous'] = security_policy.macChanges
+ security_policy.macChanges = sec_mac_changes
+ changed = True
+
+ if sec_forged_transmits is not None:
+ results['sec_forged_transmits'] = sec_forged_transmits
+ if security_policy.forgedTransmits is not sec_forged_transmits:
+ results['sec_forged_transmits_previous'] = security_policy.forgedTransmits
+ security_policy.forgedTransmits = sec_forged_transmits
+ changed = True
+
+ return changed
+
+ def update_teaming_policy(self, spec, results):
+ """
+ Update the teaming policy according to the parameters
+ Args:
+ spec: The vSwitch spec
+ results: The results dict
+
+ Returns: True if changes have been made, else false
+ """
+ if not self.params['teaming'] or not spec.policy.nicTeaming:
+ return False
+
+ teaming_policy = spec.policy.nicTeaming
+ changed = False
+ teaming_load_balancing = self.params['teaming'].get('load_balancing')
+ teaming_failure_detection = self.params['teaming'].get('network_failure_detection')
+ teaming_notify_switches = self.params['teaming'].get('notify_switches')
+ teaming_failback = self.params['teaming'].get('failback')
+ teaming_failover_order_active = self.params['teaming'].get('active_adapters')
+ teaming_failover_order_standby = self.params['teaming'].get('standby_adapters')
+
+ # Check teaming policy
+ if teaming_load_balancing is not None:
+ results['load_balancing'] = teaming_load_balancing
+ if teaming_policy.policy != teaming_load_balancing:
+ results['load_balancing_previous'] = teaming_policy.policy
+ teaming_policy.policy = teaming_load_balancing
+ changed = True
+
+ # Check teaming notify switches
+ if teaming_notify_switches is not None:
+ results['notify_switches'] = teaming_notify_switches
+ if teaming_policy.notifySwitches is not teaming_notify_switches:
+ results['notify_switches_previous'] = teaming_policy.notifySwitches
+ teaming_policy.notifySwitches = teaming_notify_switches
+ changed = True
+
+ # Check failback
+ if teaming_failback is not None:
+ results['failback'] = teaming_failback
+ current_failback = not teaming_policy.rollingOrder
+ if current_failback != teaming_failback:
+ results['failback_previous'] = current_failback
+ teaming_policy.rollingOrder = not teaming_failback
+ changed = True
+
+ # Check teaming failover order
+ if teaming_failover_order_active is not None:
+ results['failover_active'] = teaming_failover_order_active
+ if teaming_policy.nicOrder.activeNic != teaming_failover_order_active:
+ results['failover_active_previous'] = teaming_policy.nicOrder.activeNic
+ teaming_policy.nicOrder.activeNic = teaming_failover_order_active
+ changed = True
+ if teaming_failover_order_standby is not None:
+ results['failover_standby'] = teaming_failover_order_standby
+ if teaming_policy.nicOrder.standbyNic != teaming_failover_order_standby:
+ results['failover_standby_previous'] = teaming_policy.nicOrder.standbyNic
+ teaming_policy.nicOrder.standbyNic = teaming_failover_order_standby
+ changed = True
+
+ # Check teaming failure detection
+ if teaming_failure_detection is not None:
+ results['failure_detection'] = teaming_failure_detection
+ if teaming_failure_detection == "link_status_only":
+ if teaming_policy.failureCriteria.checkBeacon is True:
+ results['failure_detection_previous'] = "beacon_probing"
+ teaming_policy.failureCriteria.checkBeacon = False
+ changed = True
+ elif teaming_failure_detection == "beacon_probing":
+ if teaming_policy.failureCriteria.checkBeacon is False:
+ results['failure_detection_previous'] = "link_status_only"
+ teaming_policy.failureCriteria.checkBeacon = True
+ changed = True
+
+ return changed
+
+ def update_traffic_shaping_policy(self, spec, results):
+ """
+ Update the traffic shaping policy according to the parameters
+ Args:
+ spec: The vSwitch spec
+ results: The results dict
+
+ Returns: True if changes have been made, else false
+ """
+ if not self.params['traffic_shaping'] or not spec.policy.nicTeaming:
+ return False
+
+ ts_policy = spec.policy.shapingPolicy
+ changed = False
+ ts_enabled = self.params['traffic_shaping'].get('enabled')
+
+ # Check if traffic shaping needs to be disabled
+ if not ts_enabled:
+ if ts_policy.enabled:
+ ts_policy.enabled = False
+ changed = True
+ return changed
+
+ for value in ['average_bandwidth', 'peak_bandwidth', 'burst_size']:
+ if not self.params['traffic_shaping'].get(value):
+ self.module.fail_json(msg="traffic_shaping.%s is a required parameter if traffic_shaping is enabled." % value)
+ ts_average_bandwidth = self.params['traffic_shaping'].get('average_bandwidth') * 1000
+ ts_peak_bandwidth = self.params['traffic_shaping'].get('peak_bandwidth') * 1000
+ ts_burst_size = self.params['traffic_shaping'].get('burst_size') * 1024
+
+ if not ts_policy.enabled:
+ ts_policy.enabled = True
+ changed = True
+
+ if ts_policy.averageBandwidth != ts_average_bandwidth:
+ results['traffic_shaping_avg_bandw'] = ts_average_bandwidth
+ results['traffic_shaping_avg_bandw_previous'] = ts_policy.averageBandwidth
+ ts_policy.averageBandwidth = ts_average_bandwidth
+ changed = True
+
+ if ts_policy.peakBandwidth != ts_peak_bandwidth:
+ results['traffic_shaping_peak_bandw'] = ts_peak_bandwidth
+ results['traffic_shaping_peak_bandw_previous'] = ts_policy.peakBandwidth
+ ts_policy.peakBandwidth = ts_peak_bandwidth
+ changed = True
+
+ if ts_policy.burstSize != ts_burst_size:
+ results['traffic_shaping_burst'] = ts_burst_size
+ results['traffic_shaping_burst_previous'] = ts_policy.burstSize
+ ts_policy.burstSize = ts_burst_size
+ changed = True
+
+ return changed
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ switch=dict(type='str', required=True, aliases=['switch_name']),
+ nics=dict(type='list', aliases=['nic_name'], default=[], elements='str'),
+ number_of_ports=dict(type='int', default=128),
+ mtu=dict(type='int', default=1500),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ esxi_hostname=dict(type='str', aliases=['host']),
+ security=dict(
+ type='dict',
+ options=dict(
+ promiscuous_mode=dict(type='bool'),
+ forged_transmits=dict(type='bool'),
+ mac_changes=dict(type='bool'),
+ ),
+ aliases=['security_policy', 'network_policy']
+ ),
+ teaming=dict(
+ type='dict',
+ options=dict(
+ load_balancing=dict(
+ type='str',
+ choices=[
+ 'loadbalance_ip',
+ 'loadbalance_srcmac',
+ 'loadbalance_srcid',
+ 'failover_explicit',
+ ],
+ aliases=['load_balance_policy'],
+ ),
+ network_failure_detection=dict(
+ type='str',
+ choices=['link_status_only', 'beacon_probing']
+ ),
+ notify_switches=dict(type='bool'),
+ failback=dict(type='bool'),
+ active_adapters=dict(type='list', elements='str'),
+ standby_adapters=dict(type='list', elements='str'),
+ ),
+ aliases=['teaming_policy']
+ ),
+ traffic_shaping=dict(
+ type='dict',
+ options=dict(
+ enabled=dict(type='bool'),
+ average_bandwidth=dict(type='int'),
+ peak_bandwidth=dict(type='int'),
+ burst_size=dict(type='int'),
+ ),
+ ),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ host_virtual_switch = VMwareHostVirtualSwitch(module)
+ host_virtual_switch.process_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vmware_vswitch_info.py b/ansible_collections/community/vmware/plugins/modules/vmware_vswitch_info.py
new file mode 100644
index 000000000..e053a4b1d
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vmware_vswitch_info.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vmware_vswitch_info
+short_description: Gathers info about an ESXi host's vswitch configurations
+description:
+- This module can be used to gather information about an ESXi host's vswitch configurations when ESXi hostname or Cluster name is given.
+- The vSphere Client shows the value for the number of ports as elastic from vSphere 5.5 and above.
+- Other tools like esxcli might show the number of ports as 1536 or 5632.
+- See U(https://kb.vmware.com/s/article/2064511) for more details.
+author:
+- Abhijeet Kasurde (@Akasurde)
+options:
+ policies:
+ version_added: '2.4.0'
+ description:
+ - Gather information about Security, Traffic Shaping, as well as Teaming and failover.
+ - The property C(ts) stands for Traffic Shaping and C(lb) for Load Balancing.
+ type: bool
+ default: false
+ cluster_name:
+ description:
+ - Name of the cluster.
+ - Info about vswitch belonging to every ESXi host systems under this cluster will be returned.
+ - If C(esxi_hostname) is not given, this parameter is required.
+ type: str
+ esxi_hostname:
+ description:
+ - ESXi hostname to gather information from.
+ - If C(cluster_name) is not given, this parameter is required.
+ type: str
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather vswitch info about all ESXi Host in given Cluster
+ community.vmware.vmware_vswitch_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ delegate_to: localhost
+ register: all_hosts_vswitch_info
+
+- name: Gather vswitch info about ESXi Host
+ community.vmware.vmware_vswitch_info:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ delegate_to: localhost
+ register: all_vswitch_info
+'''
+
+RETURN = r'''
+hosts_vswitch_info:
+ description: metadata about host's vswitch configuration
+ returned: on success
+ type: dict
+ sample: {
+ "10.76.33.218": {
+ "vSwitch0": {
+ "mtu": 1500,
+ "num_ports": 128,
+ "pnics": [
+ "vmnic0"
+ ],
+ "failback": true,
+ "failover_active": ["vmnic0"],
+ "failover_standby": [],
+ "failure_detection": "link_status_only",
+ "lb": "loadbalance_srcid",
+ "notify": true,
+ "security": [false, false, false],
+ "ts": false
+ },
+ "vSwitch_0011": {
+ "mtu": 1500,
+ "num_ports": 128,
+ "pnics": [
+ "vmnic2",
+ "vmnic1"
+ ],
+ "failback": true,
+ "failover_active": ["vmnic1"],
+ "failover_standby": ["vmnic2"],
+ "failure_detection": "link_status_only",
+ "lb": "loadbalance_srcid",
+ "notify": true,
+ "security": [false, false, false],
+ "ts": false,
+ },
+ },
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
+
+
+class VswitchInfoManager(PyVmomi):
+ """Class to gather vSwitch info"""
+
+ def __init__(self, module):
+ super(VswitchInfoManager, self).__init__(module)
+ cluster_name = self.params.get('cluster_name', None)
+ esxi_host_name = self.params.get('esxi_hostname', None)
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system.")
+ self.policies = self.params.get('policies')
+
+ @staticmethod
+ def serialize_pnics(vswitch_obj):
+ """Get pnic names"""
+ pnics = []
+ for pnic in vswitch_obj.pnic:
+ # vSwitch contains all PNICs as string in format of 'key-vim.host.PhysicalNic-vmnic0'
+ pnics.append(pnic.split("-", 3)[-1])
+ return pnics
+
+ @staticmethod
+ def normalize_vswitch_info(vswitch_obj, policy_info):
+ """Create vSwitch information"""
+ vswitch_info_dict = dict()
+ spec = vswitch_obj.spec
+ vswitch_info_dict['pnics'] = VswitchInfoManager.serialize_pnics(vswitch_obj)
+ vswitch_info_dict['mtu'] = vswitch_obj.mtu
+ vswitch_info_dict['num_ports'] = spec.numPorts
+
+ if policy_info:
+ # Security info
+ if spec.policy.security:
+ vswitch_info_dict['security'] = (
+ [
+ spec.policy.security.allowPromiscuous,
+ spec.policy.security.macChanges,
+ spec.policy.security.forgedTransmits
+ ]
+ )
+
+ # Traffic Shaping info
+ if spec.policy.shapingPolicy:
+ vswitch_info_dict['ts'] = spec.policy.shapingPolicy.enabled
+
+ # Teaming and failover info
+ if spec.policy.nicTeaming:
+ vswitch_info_dict['lb'] = spec.policy.nicTeaming.policy
+ vswitch_info_dict['notify'] = spec.policy.nicTeaming.notifySwitches
+ vswitch_info_dict['failback'] = not spec.policy.nicTeaming.rollingOrder
+ vswitch_info_dict['failover_active'] = spec.policy.nicTeaming.nicOrder.activeNic
+ vswitch_info_dict['failover_standby'] = spec.policy.nicTeaming.nicOrder.standbyNic
+ if spec.policy.nicTeaming.failureCriteria.checkBeacon:
+ vswitch_info_dict['failure_detection'] = "beacon_probing"
+ else:
+ vswitch_info_dict['failure_detection'] = "link_status_only"
+
+ return vswitch_info_dict
+
+ def gather_vswitch_info(self):
+ """Gather vSwitch info"""
+ hosts_vswitch_info = dict()
+ for host in self.hosts:
+ network_manager = host.configManager.networkSystem
+ if network_manager:
+ temp_switch_dict = dict()
+ for vswitch in network_manager.networkInfo.vswitch:
+ temp_switch_dict[vswitch.name] = self.normalize_vswitch_info(vswitch_obj=vswitch, policy_info=self.policies)
+ hosts_vswitch_info[host.name] = temp_switch_dict
+ return hosts_vswitch_info
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ cluster_name=dict(type='str', required=False),
+ esxi_hostname=dict(type='str', required=False),
+ policies=dict(type='bool', required=False, default=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ vmware_vswitch_mgr = VswitchInfoManager(module)
+ module.exit_json(changed=False, hosts_vswitch_info=vmware_vswitch_mgr.gather_vswitch_info())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vsan_health_silent_checks.py b/ansible_collections/community/vmware/plugins/modules/vsan_health_silent_checks.py
new file mode 100644
index 000000000..b62068220
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vsan_health_silent_checks.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2023, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vsan_health_silent_checks
+version_added: '3.6.0'
+short_description: Silence vSAN health checks
+description:
+ - Take a list of vSAN health checks and silence them
+ - Re-enable alerts for previously silenced health checks
+author:
+ - Philipp Fruck (@p-fruck)
+requirements:
+ - vSAN Management SDK, which needs to be downloaded from VMware and installed manually.
+options:
+ cluster_name:
+ description:
+ - Name of the vSAN cluster.
+ type: str
+ required: true
+ checks:
+ description:
+ - The checks to silence.
+ type: list
+ elements: str
+ required: false
+ state:
+ description:
+ - The state of the health checks.
+ - If set to C(present), all given health checks will be silenced.
+ - If set to C(absent), all given health checks will be removed from the list of silent checks.
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+ - community.vmware.vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Disable the vSAN Support Insight health check
+ community.vmware.vsan_health_silent_checks:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ checks: vsanenablesupportinsight
+ cluster_name: 'vSAN01'
+ delegate_to: localhost
+
+- name: Re-enable health check alerts for release catalog and HCL DB
+ community.vmware.vsan_health_silent_checks:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ checks:
+ - releasecataloguptodate
+ - autohclupdate
+ state: absent
+ cluster_name: 'vSAN01'
+ delegate_to: localhost
+'''
+
+RETURN = r'''#
+'''
+
+
+import traceback
+
+VSANPYTHONSDK_IMP_ERR = None
+try:
+ import vsanapiutils
+ HAS_VSANPYTHONSDK = True
+except ImportError:
+ VSANPYTHONSDK_IMP_ERR = traceback.format_exc()
+ HAS_VSANPYTHONSDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.vmware.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VsanApi(PyVmomi):
+ def __init__(self, module):
+ super(VsanApi, self).__init__(module)
+
+ client_stub = self.si._GetStub()
+ ssl_context = client_stub.schemeArgs.get('context')
+ apiVersion = vsanapiutils.GetLatestVmodlVersion(module.params['hostname'])
+ vcMos = vsanapiutils.GetVsanVcMos(client_stub, context=ssl_context, version=apiVersion)
+ self.vsanClusterHealthSystem = vcMos['vsan-cluster-health-system']
+ self.get_cluster()
+
+ def get_cluster(self):
+ cluster_name = self.params.get('cluster_name')
+ self.cluster = self.find_cluster_by_name(cluster_name=cluster_name)
+ if self.cluster is None:
+ self.module.fail_json(msg=f"Cluster {cluster_name} does not exist.")
+
+ def process_state(self):
+ kwargs = {'cluster': self.cluster}
+ silent_checks = self.vsanClusterHealthSystem.VsanHealthGetVsanClusterSilentChecks(**kwargs)
+
+ state = self.params.get('state')
+ if state == 'present':
+ checks = [check for check in self.params.get('checks') if check not in silent_checks]
+ kwargs['addSilentChecks'] = checks
+ else:
+ checks = [check for check in self.params.get('checks') if check in silent_checks]
+ kwargs['removeSilentChecks'] = checks
+
+ if not checks:
+ return False
+
+ success = self.vsanClusterHealthSystem.VsanHealthSetVsanClusterSilentChecks(**kwargs)
+ if not success:
+ raise Exception(f"Unknown error modifying checks {checks}")
+
+ return True
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+
+ argument_spec.update(dict(
+ checks=dict(type='list', elements='str', required=False),
+ cluster_name=dict(type='str', required=True),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ api = VsanApi(module)
+ try:
+ changed = api.process_state()
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vsphere_copy.py b/ansible_collections/community/vmware/plugins/modules/vsphere_copy.py
new file mode 100644
index 000000000..3fe227e68
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vsphere_copy.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vsphere_copy
+short_description: Copy a file to a VMware datastore
+description:
+ - Upload files to a VMware datastore through a vCenter REST API.
+author:
+- Dag Wieers (@dagwieers)
+options:
+ src:
+ description:
+ - The file to push to vCenter.
+ required: true
+ type: str
+ aliases: [ name ]
+ datacenter:
+ description:
+ - The datacenter on the vCenter server that holds the datastore.
+ required: false
+ type: str
+ datastore:
+ description:
+ - The datastore to push files to.
+ required: true
+ type: str
+ path:
+ description:
+ - The file to push to the datastore.
+ required: true
+ type: str
+ aliases: [ dest ]
+ timeout:
+ description:
+ - The timeout in seconds for the upload to the datastore.
+ default: 10
+ type: int
+
+notes:
+ - "This module ought to be run from a system that can access the vCenter or the ESXi directly and has the file to transfer.
+ It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)."
+extends_documentation_fragment:
+- community.vmware.vmware.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Copy file to datastore using delegate_to
+ community.vmware.vsphere_copy:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ src: /some/local/file
+ datacenter: DC1 Someplace
+ datastore: datastore1
+ path: some/remote/file
+ delegate_to: localhost
+
+- name: Copy file to datastore when datacenter is inside folder called devel
+ community.vmware.vsphere_copy:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ src: /some/local/file
+ datacenter: devel/DC1
+ datastore: datastore1
+ path: some/remote/file
+ delegate_to: localhost
+
+- name: Copy file to datastore using other_system
+ community.vmware.vsphere_copy:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ src: /other/local/file
+ datacenter: DC2 Someplace
+ datastore: datastore2
+ path: other/remote/file
+ delegate_to: other_system
+'''
+
+import atexit
+import errno
+import mmap
+import os
+import socket
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode, quote
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec
+
+
+def vmware_path(datastore, datacenter, path):
+ ''' Constructs a URL path that vSphere accepts reliably '''
+ path = "/folder/%s" % quote(path.lstrip("/"))
+ # Due to a software bug in vSphere, it fails to handle ampersand in datacenter names
+ # The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ?
+ if not path.startswith("/"):
+ path = "/" + path
+ params = dict(dsName=datastore)
+ if datacenter:
+ datacenter = datacenter.replace('&', '%26')
+ params["dcPath"] = datacenter
+ params = urlencode(params)
+ return "%s?%s" % (path, params)
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(
+ src=dict(required=True, aliases=['name']),
+ datacenter=dict(required=False),
+ datastore=dict(required=True),
+ path=dict(required=True, aliases=['dest'], type='str'),
+ timeout=dict(default=10, type='int'))
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ # Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
+ supports_check_mode=False,
+ )
+
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params.get('password')
+ src = module.params.get('src')
+ datacenter = module.params.get('datacenter')
+ datastore = module.params.get('datastore')
+ path = module.params.get('path')
+ validate_certs = module.params.get('validate_certs')
+ timeout = module.params.get('timeout')
+
+ try:
+ fd = open(src, "rb")
+ atexit.register(fd.close)
+ except Exception as e:
+ module.fail_json(msg="Failed to open src file %s" % to_native(e))
+
+ if os.stat(src).st_size == 0:
+ data = ''
+ else:
+ data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
+ atexit.register(data.close)
+
+ remote_path = vmware_path(datastore, datacenter, path)
+
+ if not all([hostname, username, password]):
+ module.fail_json(msg="One of following parameter is missing - hostname, username, password")
+ url = 'https://%s%s' % (hostname, remote_path)
+
+ headers = {
+ "Content-Type": "application/octet-stream",
+ "Content-Length": str(len(data)),
+ }
+
+ r = None
+ try:
+ r = open_url(url, data=data, headers=headers, method='PUT', timeout=timeout,
+ url_username=username, url_password=password, validate_certs=validate_certs,
+ force_basic_auth=True)
+ except socket.error as e:
+ if isinstance(e.args, tuple):
+ if len(e.args) > 0:
+ if e[0] == errno.ECONNRESET:
+ # vSphere resets connection if the file is in use and cannot be replaced
+ module.fail_json(msg='Failed to upload, image probably in use', status=None, errno=e[0], reason=to_native(e), url=url)
+ else:
+ module.fail_json(msg=to_native(e))
+ else:
+ module.fail_json(msg=str(e), status=None, errno=e[0], reason=str(e),
+ url=url, exception=traceback.format_exc())
+ except Exception as e:
+ error_code = -1
+ try:
+ if isinstance(e[0], int):
+ error_code = e[0]
+ except (KeyError, TypeError):
+ pass
+ module.fail_json(msg=to_native(e), status=None, errno=error_code,
+ reason=to_native(e), url=url, exception=traceback.format_exc())
+
+ if not r:
+ module.fail_json(msg="Failed to upload", url=url,
+ errno=None, status=None, reason=None)
+ status = r.getcode()
+ if 200 <= status < 300:
+ module.exit_json(changed=True, status=status, reason=r.msg, url=url)
+ else:
+ length = r.headers.get('content-length', None)
+ if r.headers.get('transfer-encoding', '').lower() == 'chunked':
+ chunked = 1
+ else:
+ chunked = 0
+
+ module.fail_json(msg='Failed to upload', errno=None, status=status, reason=r.msg, length=length, headers=dict(r.headers), chunked=chunked, url=url)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/modules/vsphere_file.py b/ansible_collections/community/vmware/plugins/modules/vsphere_file.py
new file mode 100644
index 000000000..c42ef84de
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/modules/vsphere_file.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vsphere_file
+short_description: Manage files on a vCenter datastore
+description:
+- Manage files on a vCenter datastore.
+author:
+- Dag Wieers (@dagwieers)
+options:
+ host:
+ description:
+ - The vCenter server on which the datastore is available.
+ type: str
+ required: true
+ aliases: [ hostname ]
+ username:
+ description:
+ - The user name to authenticate on the vCenter server.
+ type: str
+ required: true
+ password:
+ description:
+ - The password to authenticate on the vCenter server.
+ type: str
+ required: true
+ datacenter:
+ description:
+ - The datacenter on the vCenter server that holds the datastore.
+ type: str
+ required: true
+ datastore:
+ description:
+ - The datastore on the vCenter server to push files to.
+ type: str
+ required: true
+ path:
+ description:
+ - The file or directory on the datastore on the vCenter server.
+ type: str
+ required: true
+ aliases: [ dest ]
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be
+ set to C(false) when no other option exists.
+ type: bool
+ default: true
+ timeout:
+ description:
+ - The timeout in seconds for the upload to the datastore.
+ type: int
+ default: 10
+ state:
+ description:
+ - The state of or the action on the provided path.
+ - If C(absent), the file will be removed.
+ - If C(directory), the directory will be created.
+ - If C(file), more information of the (existing) file will be returned.
+ - If C(touch), an empty file will be created if the path does not exist.
+ type: str
+ choices: [ absent, directory, file, touch ]
+ default: file
+notes:
+- The vSphere folder API does not allow to remove directory objects.
+'''
+
+EXAMPLES = r'''
+- name: Create an empty file on a datastore
+ community.vmware.vsphere_file:
+ host: '{{ vhost }}'
+ username: '{{ vuser }}'
+ password: '{{ vpass }}'
+ datacenter: DC1 Someplace
+ datastore: datastore1
+ path: some/remote/file
+ state: touch
+ delegate_to: localhost
+
+- name: Create a directory on a datastore
+ community.vmware.vsphere_file:
+ host: '{{ vhost }}'
+ username: '{{ vuser }}'
+ password: '{{ vpass }}'
+ datacenter: DC2 Someplace
+ datastore: datastore2
+ path: other/remote/file
+ state: directory
+ delegate_to: localhost
+
+- name: Query a file on a datastore
+ community.vmware.vsphere_file:
+ host: '{{ vhost }}'
+ username: '{{ vuser }}'
+ password: '{{ vpass }}'
+ datacenter: DC1 Someplace
+ datastore: datastore1
+ path: some/remote/file
+ state: file
+ delegate_to: localhost
+ ignore_errors: true
+
+- name: Delete a file on a datastore
+ community.vmware.vsphere_file:
+ host: '{{ vhost }}'
+ username: '{{ vuser }}'
+ password: '{{ vpass }}'
+ datacenter: DC2 Someplace
+ datastore: datastore2
+ path: other/remote/file
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+'''
+
+import socket
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import PY2
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils.six.moves.urllib.parse import quote, urlencode
+from ansible.module_utils.urls import open_url
+from ansible.module_utils._text import to_native
+
+
+def vmware_path(datastore, datacenter, path):
+ ''' Constructs a URL path that VSphere accepts reliably '''
+ path = '/folder/{path}'.format(path=quote(path.strip('/')))
+ # Due to a software bug in vSphere, it fails to handle ampersand in datacenter names
+ # The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ?
+ datacenter = datacenter.replace('&', '%26')
+ if not path.startswith('/'):
+ path = '/' + path
+ params = dict(dsName=datastore)
+ if datacenter:
+ params['dcPath'] = datacenter
+ return '{0}?{1}'.format(path, urlencode(params))
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True, aliases=['hostname']),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ datacenter=dict(type='str', required=True),
+ datastore=dict(type='str', required=True),
+ path=dict(type='str', required=True, aliases=['dest']),
+ state=dict(type='str', default='file', choices=['absent', 'directory', 'file', 'touch']),
+ timeout=dict(type='int', default=10),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ host = module.params.get('host')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ datacenter = module.params.get('datacenter')
+ datastore = module.params.get('datastore')
+ path = module.params.get('path')
+ validate_certs = module.params.get('validate_certs')
+ timeout = module.params.get('timeout')
+ state = module.params.get('state')
+
+ remote_path = vmware_path(datastore, datacenter, path)
+ url = 'https://%s%s' % (host, remote_path)
+
+ result = dict(
+ path=path,
+ size=None,
+ state=state,
+ status=None,
+ url=url,
+ )
+
+ # Check if the file/directory exists
+ try:
+ r = open_url(url, method='HEAD', timeout=timeout,
+ url_username=username, url_password=password,
+ validate_certs=validate_certs, force_basic_auth=True)
+ except HTTPError as e:
+ r = e
+ except socket.error as e:
+ module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), errno=dir(e), reason=to_native(e), **result)
+
+ if PY2:
+ sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2
+
+ status = r.getcode()
+ if status == 200:
+ exists = True
+ result['size'] = int(r.headers.get('content-length', None))
+ elif status == 404:
+ exists = False
+ else:
+ result['reason'] = r.msg
+ result['status'] = status
+ module.fail_json(msg="Failed to query for file '%s'" % path, errno=None, headers=dict(r.headers), **result)
+
+ if state == 'absent':
+ if not exists:
+ module.exit_json(changed=False, **result)
+
+ if module.check_mode:
+ result['reason'] = 'No Content'
+ result['status'] = 204
+ else:
+ try:
+ r = open_url(url, method='DELETE', timeout=timeout,
+ url_username=username, url_password=password,
+ validate_certs=validate_certs, force_basic_auth=True)
+ except HTTPError as e:
+ r = e
+ except socket.error as e:
+ module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result)
+
+ if PY2:
+ sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2
+
+ result['reason'] = r.msg
+ result['status'] = r.getcode()
+
+ if result['status'] == 405:
+ result['state'] = 'directory'
+ module.fail_json(msg='Directories cannot be removed with this module', errno=None, headers=dict(r.headers), **result)
+ elif result['status'] != 204:
+ module.fail_json(msg="Failed to remove '%s'" % path, errno=None, headers=dict(r.headers), **result)
+
+ result['size'] = None
+ module.exit_json(changed=True, **result)
+
+ # NOTE: Creating a file in a non-existing directory, then remove the file
+ elif state == 'directory':
+ if exists:
+ module.exit_json(changed=False, **result)
+
+ if module.check_mode:
+ result['reason'] = 'Created'
+ result['status'] = 201
+ else:
+ # Create a temporary file in the new directory
+ remote_path = vmware_path(datastore, datacenter, path + '/foobar.tmp')
+ temp_url = 'https://%s%s' % (host, remote_path)
+
+ try:
+ r = open_url(temp_url, method='PUT', timeout=timeout,
+ url_username=username, url_password=password,
+ validate_certs=validate_certs, force_basic_auth=True)
+ except HTTPError as e:
+ r = e
+ except socket.error as e:
+ module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result)
+
+ if PY2:
+ sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2
+
+ result['reason'] = r.msg
+ result['status'] = r.getcode()
+ if result['status'] != 201:
+ result['url'] = temp_url
+ module.fail_json(msg='Failed to create temporary file', errno=None, headers=dict(r.headers), **result)
+
+ try:
+ r = open_url(temp_url, method='DELETE', timeout=timeout,
+ url_username=username, url_password=password,
+ validate_certs=validate_certs, force_basic_auth=True)
+ except HTTPError as e:
+ r = e
+ except socket.error as e:
+ module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result)
+
+ if PY2:
+ sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2
+
+ status = r.getcode()
+ if status != 204:
+ result['reason'] = r.msg
+ result['status'] = status
+ module.warn('Failed to remove temporary file ({reason})'.format(**result))
+
+ module.exit_json(changed=True, **result)
+
+ elif state == 'file':
+
+ if not exists:
+ result['state'] = 'absent'
+ result['status'] = status
+ module.fail_json(msg="File '%s' is absent, cannot continue" % path, **result)
+
+ result['status'] = status
+ module.exit_json(changed=False, **result)
+
+ elif state == 'touch':
+ if exists:
+ result['state'] = 'file'
+ module.exit_json(changed=False, **result)
+
+ if module.check_mode:
+ result['reason'] = 'Created'
+ result['status'] = 201
+ else:
+ try:
+ r = open_url(url, method='PUT', timeout=timeout,
+ url_username=username, url_password=password,
+ validate_certs=validate_certs, force_basic_auth=True)
+ except HTTPError as e:
+ r = e
+ except socket.error as e:
+ module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result)
+
+ if PY2:
+ sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2
+
+ result['reason'] = r.msg
+ result['status'] = r.getcode()
+ if result['status'] != 201:
+ module.fail_json(msg="Failed to touch '%s'" % path, errno=None, headers=dict(r.headers), **result)
+
+ result['size'] = 0
+ result['state'] = 'file'
+ module.exit_json(changed=True, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/vmware/plugins/plugin_utils/inventory.py b/ansible_collections/community/vmware/plugins/plugin_utils/inventory.py
new file mode 100644
index 000000000..258d39a9f
--- /dev/null
+++ b/ansible_collections/community/vmware/plugins/plugin_utils/inventory.py
@@ -0,0 +1,440 @@
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2020, dacrystal
+# Copyright: (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import ssl
+import atexit
+import base64
+
+try:
+ # requests is required for exception handling of the ConnectionError
+ import requests
+
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from pyVim import connect
+ from pyVmomi import vim, vmodl
+ from pyVmomi.VmomiSupport import DataObject
+ from pyVmomi import Iso8601
+
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+try:
+ from vmware.vapi.vsphere.client import create_vsphere_client
+
+ HAS_VSPHERE = True
+except ImportError:
+ HAS_VSPHERE = False
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.common.dict_transformations import _snake_to_camel
+from ansible.module_utils._text import to_text, to_native
+
+
+class BaseVMwareInventory:
+ def __init__(
+ self, hostname, username, password, port, validate_certs, with_tags, display
+ ):
+ self.hostname = hostname
+ self.username = username
+ self.password = password
+ self.port = port
+ self.with_tags = with_tags
+ self.validate_certs = validate_certs
+ self.content = None
+ self.rest_content = None
+ self.display = display
+
+ def do_login(self):
+ """
+ Check requirements and do login
+ """
+ self.check_requirements()
+ self.si, self.content = self._login()
+ if self.with_tags:
+ self.rest_content = self._login_vapi()
+
+ def _login_vapi(self):
+ """
+ Login to vCenter API using REST call
+ Returns: connection object
+
+ """
+ session = requests.Session()
+ session.verify = self.validate_certs
+ if not self.validate_certs:
+ # Disable warning shown at stdout
+ requests.packages.urllib3.disable_warnings()
+
+ server = self.hostname
+ if self.port:
+ server += ":" + str(self.port)
+
+ client, err = None, None
+ try:
+ client = create_vsphere_client(
+ server=server,
+ username=self.username,
+ password=self.password,
+ session=session,
+ )
+ except Exception as error:
+ err = error
+
+ if client is None:
+ msg = "Failed to login to %s using %s" % (server, self.username)
+ if err:
+ msg += " due to : %s" % to_native(err)
+ raise AnsibleError(msg)
+ return client
+
+ def _login(self):
+ """
+ Login to vCenter or ESXi server
+ Returns: connection object
+
+ """
+ if self.validate_certs and not hasattr(ssl, "SSLContext"):
+ raise AnsibleError(
+ "pyVim does not support changing verification mode with python < 2.7.9. Either update "
+ "python or set validate_certs to false in configuration YAML file."
+ )
+
+ ssl_context = None
+ if not self.validate_certs and hasattr(ssl, "SSLContext"):
+ ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ ssl_context.verify_mode = ssl.CERT_NONE
+
+ service_instance = None
+ try:
+ service_instance = connect.SmartConnect(
+ host=self.hostname,
+ user=self.username,
+ pwd=self.password,
+ sslContext=ssl_context,
+ port=self.port,
+ )
+
+ except vim.fault.InvalidLogin as e:
+ raise AnsibleParserError(
+ "Unable to log on to vCenter or ESXi API at %s:%s as %s: %s"
+ % (self.hostname, self.port, self.username, e.msg)
+ )
+ except vim.fault.NoPermission as e:
+ raise AnsibleParserError(
+ "User %s does not have required permission"
+ " to log on to vCenter or ESXi API at %s:%s : %s"
+ % (self.username, self.hostname, self.port, e.msg)
+ )
+ except (requests.ConnectionError, ssl.SSLError) as e:
+ raise AnsibleParserError(
+ "Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s"
+ % (self.hostname, self.port, e)
+ )
+ except vmodl.fault.InvalidRequest as e:
+ # Request is malformed
+ raise AnsibleParserError(
+ "Failed to get a response from server %s:%s as "
+ "request is malformed: %s" % (self.hostname, self.port, e.msg)
+ )
+ except Exception as e:
+ raise AnsibleParserError(
+ "Unknown error while connecting to vCenter or ESXi API at %s:%s : %s"
+ % (self.hostname, self.port, e)
+ )
+
+ if service_instance is None:
+ raise AnsibleParserError(
+ "Unknown error while connecting to vCenter or ESXi API at %s:%s"
+ % (self.hostname, self.port)
+ )
+
+ atexit.register(connect.Disconnect, service_instance)
+ return service_instance, service_instance.RetrieveContent()
+
+ def check_requirements(self):
+ """ Check all requirements for this inventory are satisfied"""
+ if not HAS_REQUESTS:
+ raise AnsibleParserError(
+ 'Please install "requests" Python module as this is required'
+ " for VMware Guest dynamic inventory plugin."
+ )
+ if not HAS_PYVMOMI:
+ raise AnsibleParserError(
+ 'Please install "PyVmomi" Python module as this is required'
+ " for VMware Guest dynamic inventory plugin."
+ )
+ if HAS_REQUESTS:
+ # Pyvmomi 5.5 and onwards requires requests 2.3
+ # https://github.com/vmware/pyvmomi/blob/master/requirements.txt
+ required_version = (2, 3)
+ requests_version = requests.__version__.split(".")[:2]
+ try:
+ requests_major_minor = tuple(map(int, requests_version))
+ except ValueError:
+ raise AnsibleParserError("Failed to parse 'requests' library version.")
+
+ if requests_major_minor < required_version:
+ raise AnsibleParserError(
+ "'requests' library version should"
+ " be >= %s, found: %s."
+ % (
+ ".".join([str(w) for w in required_version]),
+ requests.__version__,
+ )
+ )
+
+ if not HAS_VSPHERE and self.with_tags:
+ raise AnsibleError(
+ "Unable to find 'vSphere Automation SDK' Python library which is required."
+ " Please refer this URL for installation steps"
+ " - https://code.vmware.com/web/sdk/7.0/vsphere-automation-python"
+ )
+
+ if not all([self.hostname, self.username, self.password]):
+ raise AnsibleError(
+ "Missing one of the following : hostname, username, password. Please read "
+ "the documentation for more information."
+ )
+
+ def get_managed_objects_properties(
+ self, vim_type, properties=None, resources=None, strict=False
+ ):
+ """
+ Look up a Managed Object Reference in vCenter / ESXi Environment
+ :param vim_type: Type of vim object e.g, for datacenter - vim.Datacenter
+ :param properties: List of properties related to vim object e.g. Name
+ :param resources: List of resources to limit search scope
+ :param strict: Dictates if plugin raises error or just warns
+ :return: local content object
+ """
+ traversal_spec = vmodl.query.PropertyCollector.TraversalSpec
+ filter_spec = vmodl.query.PropertyCollector.FilterSpec
+ object_spec = vmodl.query.PropertyCollector.ObjectSpec
+ property_spec = vmodl.query.PropertyCollector.PropertySpec
+
+ resource_filters = resources or []
+ type_to_name_map = {}
+
+ def _handle_error(message):
+ if strict:
+ raise AnsibleError(message)
+ self.display.warning(message)
+
+ def get_contents(container, vim_types):
+ return self.content.propertyCollector.RetrieveContents(
+ [
+ filter_spec(
+ objectSet=[
+ object_spec(
+ obj=self.content.viewManager.CreateContainerView(
+ container, vim_types, True
+ ),
+ skip=False,
+ selectSet=[
+ traversal_spec(
+ type=vim.view.ContainerView,
+ path="view",
+ skip=False,
+ )
+ ],
+ )
+ ],
+ propSet=[
+ property_spec(type=t, all=False, pathSet=["name"])
+ for t in vim_types
+ ],
+ )
+ ]
+ )
+
+ def filter_containers(containers, typ, filter_list):
+ if len(filter_list) > 0:
+ objs = []
+ results = []
+ found_filters = {}
+
+ for container in containers:
+ results.extend(get_contents(container, [typ]))
+
+ for res in results:
+ if res.propSet[0].val in filter_list:
+ objs.append(res.obj)
+ found_filters[res.propSet[0].val] = True
+
+ for fil in filter_list:
+ if fil not in found_filters:
+ _handle_error(
+ "Unable to find %s %s" % (type_to_name_map[typ], fil)
+ )
+
+ return objs
+ return containers
+
+ def build_containers(containers, vim_type, names, filters):
+ filters = filters or []
+ if vim_type:
+ containers = filter_containers(containers, vim_type, names)
+
+ new_containers = []
+ for fil in filters:
+ new_filters = None
+ for k, v in fil.items():
+ if k == "resources":
+ new_filters = v
+ else:
+ vim_type = getattr(vim, _snake_to_camel(k, True))
+ names = v
+ type_to_name_map[vim_type] = k.replace("_", " ")
+
+ new_containers.extend(
+ build_containers(containers, vim_type, names, new_filters)
+ )
+
+ if len(filters) > 0:
+ return new_containers
+ return containers
+
+ containers = build_containers(
+ [self.content.rootFolder], None, None, resource_filters
+ )
+ if len(containers) == 0:
+ return []
+
+ objs_list = [
+ object_spec(
+ obj=self.content.viewManager.CreateContainerView(r, [vim_type], True),
+ selectSet=[
+ traversal_spec(path="view", skip=False, type=vim.view.ContainerView)
+ ],
+ )
+ for r in containers
+ ]
+
+ is_all = not properties
+
+ # Create Property Spec
+ property_spec = property_spec(
+ type=vim_type, all=is_all, pathSet=properties # Type of object to retrieved
+ )
+
+ # Create Filter Spec
+ filter_spec = filter_spec(
+ objectSet=objs_list,
+ propSet=[property_spec],
+ reportMissingObjectsInResults=False,
+ )
+
+ try:
+ return self.content.propertyCollector.RetrieveContents([filter_spec])
+ except vmodl.query.InvalidProperty as err:
+ _handle_error("Invalid property name: %s" % err.name)
+ except Exception as err: # pylint: disable=broad-except
+ _handle_error("Couldn't retrieve contents from host: %s" % to_native(err))
+ return []
+
+
+def in_place_merge(a, b):
+ """
+ Recursively merges second dict into the first.
+
+ """
+ if not isinstance(b, dict):
+ return b
+ for k, v in b.items():
+ if k in a and isinstance(a[k], dict):
+ a[k] = in_place_merge(a[k], v)
+ else:
+ a[k] = v
+ return a
+
+
+def to_nested_dict(vm_properties):
+ """
+ Parse properties from dot notation to dict
+
+ """
+
+ host_properties = {}
+
+ for vm_prop_name, vm_prop_val in vm_properties.items():
+ prop_parents = reversed(vm_prop_name.split("."))
+ prop_dict = parse_vim_property(vm_prop_val)
+
+ for k in prop_parents:
+ prop_dict = {k: prop_dict}
+ host_properties = in_place_merge(host_properties, prop_dict)
+
+ return host_properties
+
+
+def to_flatten_dict(d, parent_key="", sep="."):
+ """
+ Parse properties dict to dot notation
+
+ """
+ items = []
+ for k, v in d.items():
+ new_key = parent_key + sep + k if parent_key else k
+ if v and isinstance(v, dict):
+ items.extend(to_flatten_dict(v, new_key, sep=sep).items())
+ else:
+ items.append((new_key, v))
+ return dict(items)
+
+
+def parse_vim_property(vim_prop):
+ """
+ Helper method to parse VIM properties of virtual machine
+ """
+ prop_type = type(vim_prop).__name__
+ if prop_type.startswith(("vim", "vmodl", "Link")):
+ if isinstance(vim_prop, DataObject):
+ r = {}
+ for prop in vim_prop._GetPropertyList(): # pylint: disable=protected-access
+ if prop.name not in [
+ "dynamicProperty",
+ "dynamicType",
+ "managedObjectType",
+ ]:
+ sub_prop = getattr(vim_prop, prop.name)
+ r[prop.name] = parse_vim_property(sub_prop)
+ return r
+
+ if isinstance(vim_prop, list):
+ r = []
+ for prop in vim_prop:
+ r.append(parse_vim_property(prop))
+ return r
+ return vim_prop.__str__()
+
+ elif prop_type == "datetime":
+ return Iso8601.ISO8601Format(vim_prop)
+
+ elif prop_type == "long":
+ return int(vim_prop)
+ elif prop_type == "long[]":
+ return [int(x) for x in vim_prop]
+
+ elif isinstance(vim_prop, list):
+ return [parse_vim_property(x) for x in vim_prop]
+
+ elif prop_type in ["bool", "int", "NoneType", "dict"]:
+ return vim_prop
+
+ elif prop_type in ["binary"]:
+ return to_text(base64.b64encode(vim_prop))
+
+ return to_text(vim_prop)