summaryrefslogtreecommitdiffstats
path: root/ansible_collections/ovirt
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
commit975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch)
tree89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/ovirt
parentInitial commit. (diff)
downloadansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz
ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/ovirt')
-rw-r--r--ansible_collections/ovirt/ovirt/.config/ansible-lint.yml17
-rw-r--r--ansible_collections/ovirt/ovirt/CHANGELOG.rst659
-rw-r--r--ansible_collections/ovirt/ovirt/FILES.json3428
-rw-r--r--ansible_collections/ovirt/ovirt/MANIFEST.json37
-rw-r--r--ansible_collections/ovirt/ovirt/README-developers.md4
-rw-r--r--ansible_collections/ovirt/ovirt/README.md27
-rw-r--r--ansible_collections/ovirt/ovirt/README.md.in88
-rw-r--r--ansible_collections/ovirt/ovirt/automation/README.md8
-rwxr-xr-xansible_collections/ovirt/ovirt/automation/build.sh78
-rw-r--r--ansible_collections/ovirt/ovirt/bindep.txt6
-rwxr-xr-xansible_collections/ovirt/ovirt/build.sh77
-rw-r--r--ansible_collections/ovirt/ovirt/changelogs/README.md28
-rw-r--r--ansible_collections/ovirt/ovirt/changelogs/changelog.yaml897
-rw-r--r--ansible_collections/ovirt/ovirt/changelogs/config.yaml31
-rw-r--r--ansible_collections/ovirt/ovirt/changelogs/fragments/.placeholder1
-rw-r--r--ansible_collections/ovirt/ovirt/examples/filters/ovirtdiff.yml33
-rw-r--r--ansible_collections/ovirt/ovirt/examples/filters/vmips.yml77
-rw-r--r--ansible_collections/ovirt/ovirt/examples/ovirt_ansible_collections.yml21
-rw-r--r--ansible_collections/ovirt/ovirt/licenses/Apache-license.txt191
-rw-r--r--ansible_collections/ovirt/ovirt/licenses/GPL-license.txt674
-rw-r--r--ansible_collections/ovirt/ovirt/meta/execution-environment.yml17
-rw-r--r--ansible_collections/ovirt/ovirt/meta/requirements.yml7
-rw-r--r--ansible_collections/ovirt/ovirt/meta/runtime.yml82
-rw-r--r--ansible_collections/ovirt/ovirt/ovirt-ansible-collection-2.4.1.tar.gzbin0 -> 310911 bytes
-rw-r--r--ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec409
-rw-r--r--ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec.in409
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/callback/stdout.py57
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt.py111
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt_info.py98
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/convert_to_bytes.py21
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/convert_to_bytes.yml22
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/filtervalue.yml30
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/get_network_xml_to_dict.yml22
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/get_ovf_disk_size.py31
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/get_ovf_disk_size.yml25
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/json_query.py59
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/json_query.yml20
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/ovirtdiff.yml35
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.py158
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.yml31
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmips.yml31
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipsv4.yml31
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipsv6.yml31
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipv4.yml31
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipv6.yml31
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/filter/removesensitivevmdata.yml22
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/inventory/ovirt.py272
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/module_utils/__init__.py0
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/module_utils/cloud.py209
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/module_utils/ovirt.py919
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/module_utils/version.py52
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/__init__.py0
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_group.py372
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label.py217
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label_info.py193
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_api_info.py97
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_auth.py311
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster.py792
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster_info.py134
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter.py330
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter_info.py118
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk.py1006
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_info.py131
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_profile.py212
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event.py248
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event_info.py171
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider.py424
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider_info.py178
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group.py187
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group_info.py130
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host.py760
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_info.py153
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_network.py607
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_pm.py267
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_storage_info.py196
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_instance_type.py632
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_job.py237
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_mac_pool.py186
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network.py380
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network_info.py134
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic.py344
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic_info.py164
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission.py338
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission_info.py175
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_qos.py379
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota.py331
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota_info.py148
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_role.py196
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_scheduling_policy_info.py147
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot.py571
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot_info.py145
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_connection.py300
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain.py849
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain_info.py134
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_template_info.py156
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_vm_info.py150
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_system_option_info.py135
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag.py265
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag_info.py182
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template.py1195
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template_info.py134
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user.py204
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user_info.py130
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm.py2917
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_info.py201
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_os_info.py142
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool.py491
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool_info.py132
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile.py330
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile_info.py135
-rw-r--r--ansible_collections/ovirt/ovirt/plugins/test/ovirt_proxied_check.py46
-rw-r--r--ansible_collections/ovirt/ovirt/requirements.txt2
-rw-r--r--ansible_collections/ovirt/ovirt/roles/cluster_upgrade/README.md53
-rw-r--r--ansible_collections/ovirt/ovirt/roles/cluster_upgrade/defaults/main.yml16
-rw-r--r--ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/cluster_upgrade.yml26
-rw-r--r--ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/passwords.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/cluster_policy.yml25
-rw-r--r--ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/log_progress.yml41
-rw-r--r--ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/main.yml283
-rw-r--r--ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/pinned_vms.yml23
-rw-r--r--ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/upgrade.yml156
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/README.md77
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/defaults/main.yml37
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/disaster_recovery_vars.yml109
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_ovirt_setup.yml11
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_play.yml8
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/ovirt_passwords.yml13
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/bcolors.py21
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/dr.conf21
-rwxr-xr-xansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_back.py288
-rwxr-xr-xansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_over.py243
-rwxr-xr-xansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_mapping.py445
-rwxr-xr-xansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars.py318
-rwxr-xr-xansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars_test.py38
-rwxr-xr-xansible_collections/ovirt/ovirt/roles/disaster_recovery/files/ovirt-dr158
-rwxr-xr-xansible_collections/ovirt/ovirt/roles/disaster_recovery/files/validator.py731
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/vault_secret.sh1
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_disks.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain.yml22
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain_process.yml51
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml22
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml28
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_vms.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vm.yml14
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vms.yml23
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/update_ovf_store.yml20
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean_engine.yml130
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/generate_mapping.yml15
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/main.yml35
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_domain.yml61
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml32
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml32
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml61
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml28
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml33
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/print_info.yml19
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_template.yml29
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_templates.yml23
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vm.yml33
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vms.yml26
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/report_log_template.j224
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/run_vms.yml23
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover_engine.yml213
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/run_unregistered_entities.yml42
-rw-r--r--ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/unregister_entities.yml62
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/README.md169
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/defaults/main.yml41
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-deploy.yml18
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-upgrade.yml19
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/examples/passwords.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/engine_setup.yml121
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/install_packages.yml25
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/main.yml10
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/pre_install_checks.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/restore_engine_from_file.yml20
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_basic.txt.j23
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_upgrade.txt.j21
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_basic.txt.j214
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_upgrade.txt.j216
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_basic.txt.j211
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_upgrade.txt.j21
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_basic.txt.j24
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_upgrade.txt.j22
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.5_basic.txt.j21
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.5_upgrade.txt.j21
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/templates/basic_answerfile.txt.j264
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tests/containers-deploy.yml17
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-deploy.yml17
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-upgrade.yml17
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tests/inventory4
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tests/passwords.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tests/requirements.yml4
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-4.2.yml6
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-master.yml6
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-upgrade-4.2-to-master.yml10
-rw-r--r--ansible_collections/ovirt/ovirt/roles/engine_setup/vars/main.yml2
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/README.md393
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/defaults/main.yml119
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_localhost.yml8
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_remotehost.yml7
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/iscsi_deployment_remote.json18
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/nfs_deployment.json8
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/passwords.yml13
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/required_networks_fix.yml56
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/files/35-allow-ansible-for-vdsm.rules4
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_add_host/README.md3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/README.md3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/add_host_storage_domain.yml53
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_after_engine_setup/README.md3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_before_engine_setup/README.md3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/add_engine_as_ansible_host.yml25
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/alter_libvirt_default_net_configuration.yml33
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/apply_openscap_profile.yml37
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_revoke.yml6
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_sso.yml13
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/01_prepare_routing_rules.yml100
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/02_create_local_vm.yml159
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml132
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/04_engine_final_tasks.yml90
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/05_add_host.yml275
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_cloud_init_config.yml20
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_local_storage_pools.yml28
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_localvm_dir.yml11
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_storage_domain.yml191
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/01_create_target_hosted_engine_vm.yml173
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/02_engine_vm_configuration.yml81
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/03_hosted_engine_final_tasks.yml501
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fc_getdevices.yml10
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_engine_logs.yml41
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_host_ip.yml41
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_team_devices.yml33
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_unsupported_vlan_devices.yml64
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/final_clean.yml11
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/full_execution.yml72
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_appliance_dist.yml9
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_local_vm_disk_path.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/initial_clean.yml150
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_appliance.yml33
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_packages.yml9
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/ipv_switch.yml11
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_discover.yml25
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_getdevices.yml34
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/main.yml19
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/partial_execution.yml155
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pause_execution.yml13
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/001_validate_network_interfaces.yml92
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/002_validate_hostname_tasks.yml122
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/define_variables.yml52
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_data_center_name.yml15
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_firewalld.yml14
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_gateway.yml25
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_mac_address.yml15
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_memory_size.yml44
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_network_test.yml37
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_services_status.yml17
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_vcpus_count.yml17
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_backup.yml87
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_host_redeploy.yml29
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/search_available_network_subnet.yaml57
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/sync_on_engine_machine.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_ip_prefix.yml29
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_vlan_bond_mode.yml56
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_vlan_name.yml30
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/broker.conf.j28
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/fhanswers.conf.j266
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/hosted-engine.conf.j242
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-dhcp.j212
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static-ipv6.j225
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static.j225
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/meta-data.j22
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config-dhcp.j211
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config.j224
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/user-data.j220
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/version.j21
-rw-r--r--ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/vm.conf.j216
-rw-r--r--ansible_collections/ovirt/ovirt/roles/image_template/README.md158
-rw-r--r--ansible_collections/ovirt/ovirt/roles/image_template/defaults/main.yml21
-rw-r--r--ansible_collections/ovirt/ovirt/roles/image_template/examples/ovirt_image_template.yml27
-rw-r--r--ansible_collections/ovirt/ovirt/roles/image_template/examples/passwords.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/image_template/tasks/empty.yml8
-rw-r--r--ansible_collections/ovirt/ovirt/roles/image_template/tasks/glance_image.yml62
-rw-r--r--ansible_collections/ovirt/ovirt/roles/image_template/tasks/main.yml8
-rw-r--r--ansible_collections/ovirt/ovirt/roles/image_template/tasks/qcow2_image.yml259
-rw-r--r--ansible_collections/ovirt/ovirt/roles/image_template/vars/main.yml3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/README.md438
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/defaults/main.yml3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra.yml15
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra_destroy.yml43
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/examples/vars/ovirt_infra_vars.yml108
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/examples/vars/passwords.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/README.md60
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/defaults/main.yml2
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/tasks/main.yml88
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/README.md106
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/tasks/main.yml45
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/vars/main.yml27
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/README.md30
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/defaults/main.yml2
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/cluster_and_hosts.yml21
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/datacenter.yml6
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/disks.yml16
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/main.yml39
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_last.yml11
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_pre.yml28
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/templates.yml16
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vm_pools.yml15
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vms.yml15
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/README.md56
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/defaults/main.yml3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/tasks/main.yml28
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/README.md73
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/tasks/main.yml18
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/README.md43
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/defaults/main.yml3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/tasks/main.yml88
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/README.md39
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/tasks/main.yml11
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/networks/README.md77
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/networks/tasks/main.yml42
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/README.md49
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/tasks/main.yml40
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/storages/README.md65
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/roles/storages/tasks/main.yml107
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/tasks/create_infra.yml37
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/tasks/main.yml49
-rw-r--r--ansible_collections/ovirt/ovirt/roles/infra/tasks/remove_infra.yml4
-rw-r--r--ansible_collections/ovirt/ovirt/roles/remove_stale_lun/README.md46
-rw-r--r--ansible_collections/ovirt/ovirt/roles/remove_stale_lun/defaults/main.yml2
-rw-r--r--ansible_collections/ovirt/ovirt/roles/remove_stale_lun/examples/passwords.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/remove_stale_lun/examples/remove_stale_lun.yml34
-rw-r--r--ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/fetch_hosts.yml46
-rw-r--r--ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/main.yml5
-rw-r--r--ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/remove_mpath_device.yml27
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/README.md132
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/defaults/main.yml14
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_release_rpm.yml11
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_subscription_manager.yml22
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/examples/passwords.yml13
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/tasks/backup-repos.yml26
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/tasks/install-satellite-ca.yml36
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/tasks/main.yml30
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/tasks/rh-subscription.yml83
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/tasks/rpm.yml20
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/tasks/satellite-subscription.yml30
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/tasks/search-pool-id.yml19
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/default.yml1
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.1.yml6
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.2.yml7
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.3.yml7
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.4.yml8
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_eus_4.4.yml3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.1.yml3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.2.yml4
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.3.yml4
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.4.yml8
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/host_eus_4.4.yml3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/host_ppc_4.4.yml6
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/host_ppc_eus_4.4.yml3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.1.yml2
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.2.yml2
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.3.yml2
-rw-r--r--ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.4.yml2
-rw-r--r--ansible_collections/ovirt/ovirt/roles/shutdown_env/README.md53
-rw-r--r--ansible_collections/ovirt/ovirt/roles/shutdown_env/defaults/main.yml3
-rw-r--r--ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/passwords.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/shutdown_env.yml18
-rw-r--r--ansible_collections/ovirt/ovirt/roles/shutdown_env/tasks/main.yml236
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/README.md324
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/defaults/main.yml15
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra.yml48
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra_inv.yml72
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/examples/passwords.yml12
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_groups.yml22
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_labels.yml13
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_inventory.yml44
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_vms.yml48
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/main.yml63
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/manage_state.yml91
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_absent.yml6
-rw-r--r--ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_present.yml159
-rw-r--r--ansible_collections/ovirt/ovirt/tests/.gitignore1
-rw-r--r--ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.10.txt2
-rw-r--r--ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.11.txt13
-rw-r--r--ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.12.txt14
-rw-r--r--ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.13.txt16
-rw-r--r--ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.9.txt2
386 files changed, 43377 insertions, 0 deletions
diff --git a/ansible_collections/ovirt/ovirt/.config/ansible-lint.yml b/ansible_collections/ovirt/ovirt/.config/ansible-lint.yml
new file mode 100644
index 000000000..77bdaa939
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/.config/ansible-lint.yml
@@ -0,0 +1,17 @@
+skip_list:
+ - ignore-errors
+ - var-spacing
+ - var-naming
+ - experimental
+ - package-latest
+ - name[casing]
+ - name[template]
+warn_list:
+ - no-changed-when
+ - fqcn-builtins
+ - template-instead-of-copy
+ - jinja[invalid]
+ - fqcn[action-core]
+ - fqcn[action]
+ - name[missing]
+ - deprecated-module
diff --git a/ansible_collections/ovirt/ovirt/CHANGELOG.rst b/ansible_collections/ovirt/ovirt/CHANGELOG.rst
new file mode 100644
index 000000000..ff17b4b6d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/CHANGELOG.rst
@@ -0,0 +1,659 @@
+=========================
+ovirt.ovirt Release Notes
+=========================
+
+.. contents:: Topics
+
+
+v2.4.1
+======
+
+Bugfixes
+--------
+
+- cluster_upgrade - Fix the engine_correlation_id location (https://github.com/oVirt/ovirt-ansible-collection/pull/637).
+
+v2.4.0
+======
+
+Bugfixes
+--------
+
+- cluster_upgrade - Add default random uuid to engine_correlation_id (https://github.com/oVirt/ovirt-ansible-collection/pull/624).
+- image_template - Add template_bios_type (https://github.com/oVirt/ovirt-ansible-collection/pull/620).
+
+v2.3.1
+======
+
+Bugfixes
+--------
+
+- filters - Fix ovirtvmipsv4 with attribute and network (https://github.com/oVirt/ovirt-ansible-collection/pull/607).
+- filters - Fix ovirtvmipsv4 with filter to list (https://github.com/oVirt/ovirt-ansible-collection/pull/609).
+- ovirt_host - Fix kernel_params elemets type (https://github.com/oVirt/ovirt-ansible-collection/pull/608).
+
+v2.3.0
+======
+
+Minor Changes
+-------------
+
+- filters - Add documentation to all filters (https://github.com/oVirt/ovirt-ansible-collection/pull/603).
+- ovirt_disk - Add read_only param for disk attachments (https://github.com/oVirt/ovirt-ansible-collection/pull/597).
+- ovirt_disk - Fix disk attachment to VM (https://github.com/oVirt/ovirt-ansible-collection/pull/361).
+
+Bugfixes
+--------
+
+- Fix ovirtvmipsv4 when using attribute (https://github.com/oVirt/ovirt-ansible-collection/pull/596).
+- he-setup - fix static ipv6 ifcfg setup (https://github.com/oVirt/ovirt-ansible-collection/pull/592).
+- ovirt_host - Honor activate and reboot_after_installation when they are set to false with reinstalled host state (https://github.com/oVirt/ovirt-ansible-collection/pull/587).
+- repositories - RHV 4.4 SP1 is supported only on RHEL 8.6 EUS (https://github.com/oVirt/ovirt-ansible-collection/pull/576).
+
+v2.2.3
+======
+
+Minor Changes
+-------------
+
+- hosted_engine_setup - fix ovirt-provider-ovn-driver broken link (https://github.com/oVirt/ovirt-ansible-collection/pull/581).
+
+Bugfixes
+--------
+
+- cluster_upgrade - skip host upgrades without anything to update (https://github.com/oVirt/ovirt-ansible-collection/pull/580).
+- hosted_engine_setup - restore - remove host also based on name (https://github.com/oVirt/ovirt-ansible-collection/pull/567).
+- repositories - Fix example variable names (https://github.com/oVirt/ovirt-ansible-collection/pull/582).
+
+v2.2.2
+======
+
+Bugfixes
+--------
+
+- hosted_engine_setup - Detect hosted-engine-ha version using /usr/libexec/platform-python (https://github.com/oVirt/ovirt-ansible-collection/pull/573).
+- hosted_engine_setup - update ansible version in README (https://github.com/oVirt/ovirt-ansible-collection/pull/571).
+- repositories - Add mod_auth_openidc:2.3 and nodejs:14 to dnf modules (https://github.com/oVirt/ovirt-ansible-collection/pull/578).
+
+v2.2.1
+======
+
+Minor Changes
+-------------
+
+- During he_setup, configure ovn with he_host_address (https://github.com/oVirt/ovirt-ansible-collection/pull/568).
+
+Bugfixes
+--------
+
+- hosted_engine_setup - fix hosted-engine.conf permissions and ownership (https://github.com/oVirt/ovirt-ansible-collection/pull/569).
+
+v2.2.0
+======
+
+Minor Changes
+-------------
+
+- During he_setup, configure ovn with he_host_name for correct operation of ovn (https://github.com/oVirt/ovirt-ansible-collection/pull/563).
+- Fix "ansible-lint" version 6.0.0 "yaml" violations for "disaster_recovery" role (https://github.com/oVirt/ovirt-ansible-collection/pull/543).
+- Fix "ansible-lint" version 6.0.0 violations for "disaster_recovery" & "remove_stale_lun" roles (https://github.com/oVirt/ovirt-ansible-collection/pull/554).
+- Fix ansible-lint for basic roles (https://github.com/oVirt/ovirt-ansible-collection/pull/280).
+- Updating the documentation - "vm_name" / "vm_id" and/or disk "id" parameter(s) are required when extending disk with non-unique name (https://github.com/oVirt/ovirt-ansible-collection/pull/559).
+- gluster_heal_info - Replacing gluster module to CLI to support RHV automation hub (https://github.com/oVirt/ovirt-ansible-collection/pull/340).
+- ovirt_disk - Add warning for disk attachments (https://github.com/oVirt/ovirt-ansible-collection/pull/347).
+- ovirt_disk - Fix disk attachment to VM (https://github.com/oVirt/ovirt-ansible-collection/pull/361).
+- ovirt_qos, ovirt_disk_profile, ovirt_disk - Add modules to allow for creation and updating of disk_profiles (https://github.com/oVirt/ovirt-ansible-collection/pull/422).
+- ovirt_snapshot - Add vm_id to select VM (https://github.com/oVirt/ovirt-ansible-collection/pull/550).
+- ovirt_vm - Add reset of VM (https://github.com/oVirt/ovirt-ansible-collection/pull/538).
+- ovirt_vm - Add virtio_scsi_enabled and multi_queues_enabled (https://github.com/oVirt/ovirt-ansible-collection/pull/348).
+- ovirt_vm - add volatile (https://github.com/oVirt/ovirt-ansible-collection/pull/539).
+- repositories - Add ovirt_repositories_rhsm_environment and FIPS fix (https://github.com/oVirt/ovirt-ansible-collection/pull/483).
+- repositories - Replace redhat_subscription and rhsm_repository with command (https://github.com/oVirt/ovirt-ansible-collection/pull/346).
+
+Bugfixes
+--------
+
+- HE - Handle migration to hosts that use systemd-coredump (https://github.com/oVirt/ovirt-ansible-collection/pull/557).
+- cluster_upgrade - Fix starting up pinned vms (https://github.com/oVirt/ovirt-ansible-collection/pull/532).
+- he - Align role with ansible-lint-6.0 (https://github.com/oVirt/ovirt-ansible-collection/pull/545).
+- hosted_engine - Specify fqcn for ovirt_system_option_info (https://github.com/oVirt/ovirt-ansible-collection/pull/536).
+- hosted_engine_setup - Fix cleanup on el9 (https://github.com/oVirt/ovirt-ansible-collection/pull/533).
+- image_template - Remove static (https://github.com/oVirt/ovirt-ansible-collection/pull/537).
+- image_template - Remove static no - unsupported in ansible 2.12 (https://github.com/oVirt/ovirt-ansible-collection/pull/341).
+- ovirt_host - Fix host wait (https://github.com/oVirt/ovirt-ansible-collection/pull/531).
+- ovirt_host - Fix restarted wait condition (https://github.com/oVirt/ovirt-ansible-collection/pull/551).
+- ovirt_storage_domain - Fix inaccessible exception (https://github.com/oVirt/ovirt-ansible-collection/pull/534).
+- ovirt_vm - check if user inputed graphical protocol (https://github.com/oVirt/ovirt-ansible-collection/pull/542).
+- repositories - Move fips check to satellite CA install block (https://github.com/oVirt/ovirt-ansible-collection/pull/553).
+- shutdown_env - Align role with ansible-lint-6.0 (https://github.com/oVirt/ovirt-ansible-collection/pull/544).
+
+v2.1.0
+======
+
+Minor Changes
+-------------
+
+- Add convert_to_bytes filter (https://github.com/oVirt/ovirt-ansible-collection/pull/515).
+- automation - Use python38 on el8 with ansible-core 2.12 and python39 on el9 with ansible-core 2.13 (https://github.com/oVirt/ovirt-ansible-collection/pull/518).
+- cloud.py - Sync with orgin (https://github.com/oVirt/ovirt-ansible-collection/pull/519).
+- engine_setup - Allow to disable cert validation (https://github.com/oVirt/ovirt-ansible-collection/pull/517).
+- hosted_engine_setup - make vdsm config cleanup optional (https://github.com/oVirt/ovirt-ansible-collection/pull/521).
+- ovirt - Remove deprecated distutils (https://github.com/oVirt/ovirt-ansible-collection/pull/516).
+- ovirt_vm - add wait_after_lease (https://github.com/oVirt/ovirt-ansible-collection/pull/524).
+
+Bugfixes
+--------
+
+- hosted_engine_setup - Fix "'ansible' ModuleNotFoundError" in Disaster Recovery scripts (https://github.com/oVirt/ovirt-ansible-collection/pull/503).
+- hosted_engine_setup - Use command instead of firewalld module (https://github.com/oVirt/ovirt-ansible-collection/pull/508).
+- ovirt_vm - Fix parsing None arguments (https://github.com/oVirt/ovirt-ansible-collection/pull/486).
+- ovirt_vm - check if the snapshot exists (https://github.com/oVirt/ovirt-ansible-collection/pull/525).
+
+v2.0.4
+======
+
+Bugfixes
+--------
+
+- Fix the admin user name when using keycloak (https://github.com/oVirt/ovirt-ansible-collection/pull/488).
+- Use cryptography < 37.0.0, as 37.0.0 emits a warning that fails testing. (https://github.com/oVirt/ovirt-ansible-collection/pull/492).
+- Use rstcheck < 3.5.0, as 3.5.0 emits a warning that fails testing. (https://github.com/oVirt/ovirt-ansible-collection/pull/490).
+- cluster_upgrade - fix wait_condition (https://github.com/oVirt/ovirt-ansible-collection/pull/510).
+- hosted_engine_setup - Allocate 128MiB instead of 1GiB for he_metadata (https://github.com/oVirt/ovirt-ansible-collection/pull/489).
+- hosted_engine_setup - Collect logs also on failures in 03_hosted_engine_final_tasks.yml (https://github.com/oVirt/ovirt-ansible-collection/pull/504).
+- hosted_engine_setup - Fix keycloak activation/checking (https://github.com/oVirt/ovirt-ansible-collection/pull/509).
+- hosted_engine_setup - Require 'detail' to be 'Up' (https://github.com/oVirt/ovirt-ansible-collection/pull/498).
+- hosted_engine_setup - fix archive ownership (https://github.com/oVirt/ovirt-ansible-collection/pull/501).
+- infra - add warning for multiple storage connections (https://github.com/oVirt/ovirt-ansible-collection/pull/500).
+
+v2.0.3
+======
+
+Minor Changes
+-------------
+
+- ovirt_affinity_group - Add affinity labels (https://github.com/oVirt/ovirt-ansible-collection/pull/481).
+
+Bugfixes
+--------
+
+- invenory - Fix url address (https://github.com/oVirt/ovirt-ansible-collection/pull/482).
+- ovirt_vm - Fix creating a RAW VM from a COW template (https://github.com/oVirt/ovirt-ansible-collection/pull/466).
+
+v2.0.2
+======
+
+Bugfixes
+--------
+
+- Fix progress logging via REST (https://github.com/oVirt/ovirt-ansible-collection/pull/474).
+
+v2.0.1
+======
+
+Bugfixes
+--------
+
+- Make storage_format optional - do not fail if missing (https://github.com/oVirt/ovirt-ansible-collection/pull/471).
+
+v2.0.0
+======
+
+Major Changes
+-------------
+
+- manageiq - role removed (https://github.com/oVirt/ovirt-ansible-collection/pull/375).
+
+Minor Changes
+-------------
+
+- Add json_query filter (https://github.com/oVirt/ovirt-ansible-collection/pull/436).
+- cluster_upgrade - Add progress tracking via event logs to the role (https://github.com/oVirt/ovirt-ansible-collection/pull/415)
+- cluster_upgrade - Directly log progress to the cluster (https://github.com/oVirt/ovirt-ansible-collection/pull/449)
+- engine_setup - Honor ovirt_engine_setup_offline variable (https://github.com/oVirt/ovirt-ansible-collection/pull/381).
+- engine_setup - Prepare answer files and default values for 4.5 release (https://github.com/oVirt/ovirt-ansible-collection/pull/414).
+- gluster_heal_info - Replacing gluster module to CLI to support RHV automation hub (https://github.com/oVirt/ovirt-ansible-collection/pull/340).
+- hosted_engine - Replace virt_net and xml with commands (https://github.com/oVirt/ovirt-ansible-collection/pull/359).
+- hosted_engine_setup - Fix default gateway variable name (https://github.com/oVirt/ovirt-ansible-collection/pull/423).
+- hosted_engine_setup - Fix default gateway variable name (https://github.com/oVirt/ovirt-ansible-collection/pull/423).
+- hosted_engine_setup - Fix permissions on copied engine logs, needed for OpenSCAP (https://github.com/oVirt/ovirt-ansible-collection/pull/404).
+- hosted_engine_setup - Honor he_offline_deployment variable (https://github.com/oVirt/ovirt-ansible-collection/pull/380).
+- hosted_engine_setup - Replace calls to psql as postgres with engine_psql.sh (https://github.com/oVirt/ovirt-ansible-collection/pull/453).
+- hosted_engine_setup - configured abrt initial files only when needed (https://github.com/oVirt/ovirt-ansible-collection/pull/397).
+- info - Rename follows to follow parameter and add alias (https://github.com/oVirt/ovirt-ansible-collection/pull/367).
+- info - bump deprecate version for fetch_nested and nested_attributes (https://github.com/oVirt/ovirt-ansible-collection/pull/378).
+- info modules - Add follow link url to api model links_summary
+- info modules - Enable follow parameter (https://github.com/oVirt/ovirt-ansible-collection/pull/355).
+- manageiq - add deprecation info (https://github.com/oVirt/ovirt-ansible-collection/pull/384).
+- ovirt_disk - Add warning for disk attachments (https://github.com/oVirt/ovirt-ansible-collection/pull/347).
+- ovirt_disk - Use imageio client (https://github.com/oVirt/ovirt-ansible-collection/pull/358).
+- ovirt_event - enable correlation_id on events (https://github.com/oVirt/ovirt-ansible-collection/pull/368).
+- ovirt_host - Add enroll_certificate (https://github.com/oVirt/ovirt-ansible-collection/pull/439).
+- ovirt_permission - add mac pool (https://github.com/oVirt/ovirt-ansible-collection/pull/353).
+- ovirt_remove_stale_lun - Allow user to remove multiple LUNs (https://github.com/oVirt/ovirt-ansible-collection/pull/357).
+- ovirt_remove_stale_lun - Retry "multipath -f" while removing the LUNs (https://github.com/oVirt/ovirt-ansible-collection/pull/382).
+- ovirt_remove_stale_lun - Use add_host instead of delegate_to (https://github.com/oVirt/ovirt-ansible-collection/pull/390).
+- ovirt_storage_template_info - fix docs (https://github.com/oVirt/ovirt-ansible-collection/pull/356).
+- ovirt_storage_vm_info - fix docs (https://github.com/oVirt/ovirt-ansible-collection/pull/356).
+- ovirt_template - Add ova import of template (https://github.com/oVirt/ovirt-ansible-collection/pull/304).
+- ovirt_template - add boot_menu and bios_type https://github.com/oVirt/ovirt-ansible-collection/pull/465).
+- ovirt_vm - Add display file_transfer_enabled and copy_paste_enabled (https://github.com/oVirt/ovirt-ansible-collection/pull/339).
+- ovirt_vm - Add virtio_scsi_enabled and multi_queues_enabled (https://github.com/oVirt/ovirt-ansible-collection/pull/348).
+- ovirt_vm - Add virtio_scsi_multi_queues (https://github.com/oVirt/ovirt-ansible-collection/pull/373).
+- plugins - Remove unused imports (https://github.com/oVirt/ovirt-ansible-collection/pull/444).
+- repositories - Add to the documentation variable priority (https://github.com/oVirt/ovirt-ansible-collection/pull/440).
+- repositories - Replace redhat_subscription and rhsm_repository with command (https://github.com/oVirt/ovirt-ansible-collection/pull/346).
+- repositories - Update host and engine repositories to 4.4.9 (https://github.com/oVirt/ovirt-ansible-collection/pull/363).
+- repositories - add no_log to register (https://github.com/oVirt/ovirt-ansible-collection/pull/350).
+- repositories - add satelite support (https://github.com/oVirt/ovirt-ansible-collection/pull/431).
+- vm_infra - Add no_log to Manage VMs state task (https://github.com/oVirt/ovirt-ansible-collection/pull/417).
+
+Bugfixes
+--------
+
+- hosted_engine_setup - Add OpenSCAP security profile name parameter (https://github.com/oVirt/ovirt-ansible-collection/pull/411).
+- hosted_engine_setup - Add an option to set the storage format when createing a storage domain and use it (https://github.com/oVirt/ovirt-ansible-collection/pull/463).
+- hosted_engine_setup - Adjust files permissions (https://github.com/oVirt/ovirt-ansible-collection/pull/409).
+- hosted_engine_setup - Fix call to engine-psql for vds_spm_id (https://github.com/oVirt/ovirt-ansible-collection/pull/459).
+- hosted_engine_setup - Fix cloud-init package removal in airgapped environment (https://github.com/oVirt/ovirt-ansible-collection/pull/442)
+- hosted_engine_setup - Remove SPICE graphic protocol (https://github.com/oVirt/ovirt-ansible-collection/pull/394).
+- hosted_engine_setup - Replace xml community module (https://github.com/oVirt/ovirt-ansible-collection/pull/438).
+- hosted_engine_setup - Support disa stig profile (https://github.com/oVirt/ovirt-ansible-collection/pull/426).
+- hosted_engine_setup - Use cat command (https://github.com/oVirt/ovirt-ansible-collection/pull/443).
+- hosted_engine_setup - Use tpgt in iscsi login (https://github.com/oVirt/ovirt-ansible-collection/pull/338)
+- image_template - Remove static no - unsupported in ansible 2.12 (https://github.com/oVirt/ovirt-ansible-collection/pull/341).
+- ovirt_host - Fix failed_state_after_reinstall condition (https://github.com/oVirt/ovirt-ansible-collection/pull/371).
+- ovirt_template - Fix creating templates where the base template version number is not 1 (https://github.com/oVirt/ovirt-ansible-collection/pull/370).
+- repositories - Fix dnf module variable (https://github.com/oVirt/ovirt-ansible-collection/pull/454).
+- repositories - fix force flag on subscription-manager (https://github.com/oVirt/ovirt-ansible-collection/pull/430).
+
+New Plugins
+-----------
+
+Callback
+~~~~~~~~
+
+- ovirt.ovirt.stdout - Output the log of ansible
+
+v1.6.2
+======
+
+Minor Changes
+-------------
+
+- remove_stale_lun - Fix example for `remote_stale_lun` role to be able to run it from engine (https://github.com/oVirt/ovirt-ansible-collection/pull/334).
+
+v1.6.1
+======
+
+Bugfixes
+--------
+
+- hosted_engine_setup - Use default bridge for IPv6 advertisements (https://github.com/oVirt/ovirt-ansible-collection/pull/331)
+- ovirt_auth - Fix token no_log (https://github.com/oVirt/ovirt-ansible-collection/pull/332).
+
+v1.5.5
+======
+
+Major Changes
+-------------
+
+- remove_stale_lun - Add role for removing stale LUN (https://bugzilla.redhat.com/1966873).
+
+Minor Changes
+-------------
+
+- engine_setup - Wait for webserver up after engine-config reboot (https://github.com/oVirt/ovirt-ansible-collection/pull/324).
+- hosted_engine_setup - Pause deployment on failure of `engine-backup --mode=restore` (https://github.com/oVirt/ovirt-ansible-collection/pull/327).
+- hosted_engine_setup - Text change - Consistently use 'bootstrap engine VM' (https://github.com/oVirt/ovirt-ansible-collection/pull/328).
+- hosted_engine_setup - Update Ansible requirements in README (https://github.com/oVirt/ovirt-ansible-collection/pull/321)
+- readme - Update Ansible requirement (https://github.com/oVirt/ovirt-ansible-collection/pull/326).
+
+Bugfixes
+--------
+
+- ovirt_auth - Fix password and username requirements (https://github.com/oVirt/ovirt-ansible-collection/pull/325).
+- ovirt_disk - Fix update_check with no VM (https://github.com/oVirt/ovirt-ansible-collection/pull/323).
+
+v1.5.4
+======
+
+Minor Changes
+-------------
+
+- hosted_engine_setup - Allow FIPS on HE VM (https://github.com/oVirt/ovirt-ansible-collection/pull/313)
+
+Bugfixes
+--------
+
+- hosted_engine_setup - Use forward network during an IPv6 deployment (https://github.com/oVirt/ovirt-ansible-collection/pull/315)
+- hosted_engine_setup - remove duplicate tasks (https://github.com/oVirt/ovirt-ansible-collection/pull/314)
+- ovirt_permission - fix group search that has space in it's name (https://github.com/oVirt/ovirt-ansible-collection/pull/318)
+
+v1.5.3
+======
+
+Minor Changes
+-------------
+
+- Don't rely on safe_eval being able to do math/concat (https://github.com/oVirt/ovirt-ansible-collection/pull/307)
+- hosted_engine_setup - Fix engine vm add_host for the target machine (https://github.com/oVirt/ovirt-ansible-collection/pull/311)
+- hosted_engine_setup - Minor doc update (https://github.com/oVirt/ovirt-ansible-collection/pull/310)
+
+v1.5.2
+======
+
+Minor Changes
+-------------
+
+- hosted_engine_setup - Do not try to sync at end of full_execution (https://github.com/oVirt/ovirt-ansible-collection/pull/305)
+- ovirt_vm - Add default return value to check_placement_policy (https://github.com/oVirt/ovirt-ansible-collection/pull/301).
+
+v1.5.1
+======
+
+Minor Changes
+-------------
+
+- hosted_engine_setup - use-ansible-host (https://github.com/oVirt/ovirt-ansible-collection/pull/277).
+- infra role - Add external_provider parameter on networks role of infra role (https://github.com/oVirt/ovirt-ansible-collection/pull/297)
+- ovirt_vm - Add placement_policy_hosts (https://github.com/oVirt/ovirt-ansible-collection/pull/294).
+
+Bugfixes
+--------
+
+- hosted_engine_setup - Filter VLAN devices with bad names (https://github.com/oVirt/ovirt-ansible-collection/pull/238)
+- hosted_engine_setup - Remove cloud-init configuration (https://github.com/oVirt/ovirt-ansible-collection/pull/295).
+- ovirt inventory plugin - allow several valid values for the `plugin` key (https://github.com/oVirt/ovirt-ansible-collection/pull/293).
+
+v1.5.0
+======
+
+Minor Changes
+-------------
+
+- disaster_recovery - Change conf paths (https://github.com/oVirt/ovirt-ansible-collection/pull/286).
+- hosted_engine_setup - Add-pause-option-before-engine-setup (https://github.com/oVirt/ovirt-ansible-collection/pull/273).
+- hosted_engine_setup - Remove leftover code and omit parameters (https://github.com/oVirt/ovirt-ansible-collection/pull/281).
+- infra - Storage fix parameters typo (https://github.com/oVirt/ovirt-ansible-collection/pull/282).
+- ovirt_host - Update iscsi target struct (https://github.com/oVirt/ovirt-ansible-collection/pull/274).
+
+Bugfixes
+--------
+
+- hosted_engine_setup - Use ovirt_host module to discover iscsi (https://github.com/oVirt/ovirt-ansible-collection/pull/275).
+- hosted_engine_setup - align with ansible-lint 5.0.0 (https://github.com/oVirt/ovirt-ansible-collection/pull/271).
+
+v1.4.2
+======
+
+Minor Changes
+-------------
+
+- hosted_engine_setup - Add an error message for FIPS on CentOS (https://github.com/oVirt/ovirt-ansible-collection/pull/250).
+- hosted_engine_setup - Fix the appliance distribution (https://github.com/oVirt/ovirt-ansible-collection/pull/249).
+- infra - remove target from ovirt_storage_connection (https://github.com/oVirt/ovirt-ansible-collection/pull/252).
+- ovirt_vm - Allow migration between clusters (https://github.com/oVirt/ovirt-ansible-collection/pull/236).
+- repositories - Add host ppc (https://github.com/oVirt/ovirt-ansible-collection/pull/248).
+- repositories - Remove ansible channels from RHV 4.4 (https://github.com/oVirt/ovirt-ansible-collection/pull/242).
+- repositories - fix ppc repos (https://github.com/oVirt/ovirt-ansible-collection/pull/254).
+
+v1.4.1
+======
+
+Bugfixes
+--------
+
+- hosted_engine_setup - Fix auth revoke (https://github.com/oVirt/ovirt-ansible-collection/pull/237).
+
+v1.4.0
+======
+
+Minor Changes
+-------------
+
+- cluster_upgrade - Add correlation-id header (https://github.com/oVirt/ovirt-ansible-collection/pull/222).
+- engine_setup - Add skip renew pki confirm (https://github.com/oVirt/ovirt-ansible-collection/pull/228).
+- examples - Add recipe for removing DM device (https://github.com/oVirt/ovirt-ansible-collection/pull/233).
+- hosted_engine_setup - Filter devices with unsupported bond mode (https://github.com/oVirt/ovirt-ansible-collection/pull/226).
+- infra - Add reboot host parameters (https://github.com/oVirt/ovirt-ansible-collection/pull/231).
+- ovirt_disk - Add SATA support (https://github.com/oVirt/ovirt-ansible-collection/pull/225).
+- ovirt_user - Add ssh_public_key (https://github.com/oVirt/ovirt-ansible-collection/pull/232)
+
+Bugfixes
+--------
+
+- Set ``auth`` options into argument spec definition so Ansible will validate the user options
+- Set ``no_log`` on ``password`` and ``token`` in the ``auth`` dict so the values are exposed in the invocation log
+
+v1.3.1
+======
+
+Minor Changes
+-------------
+
+- hosted_engine_setup - Disable reboot_after_installation (https://github.com/oVirt/ovirt-ansible-collection/pull/218).
+- ovirt_host - Add reboot_after_installation option (https://github.com/oVirt/ovirt-ansible-collection/pull/217).
+
+v1.3.0
+======
+
+Major Changes
+-------------
+
+- ovirt_system_option_info - Add new module (https://github.com/oVirt/ovirt-ansible-collection/pull/206).
+
+Minor Changes
+-------------
+
+- ansible-builder - Update bindep (https://github.com/oVirt/ovirt-ansible-collection/pull/197).
+- hosted_engine_setup - Collect all engine /var/log (https://github.com/oVirt/ovirt-ansible-collection/pull/202).
+- hosted_engine_setup - Use ovirt_system_option_info instead of REST API (https://github.com/oVirt/ovirt-ansible-collection/pull/209).
+- ovirt_disk - Add install warning (https://github.com/oVirt/ovirt-ansible-collection/pull/208).
+- ovirt_info - Fragment add auth suboptions to documentation (https://github.com/oVirt/ovirt-ansible-collection/pull/205).
+
+v1.2.4
+======
+
+Minor Changes
+-------------
+
+- infra - don't require passowrd for user (https://github.com/oVirt/ovirt-ansible-collection/pull/195).
+- inventory - correct os_type name (https://github.com/oVirt/ovirt-ansible-collection/pull/194).
+- ovirt_disk - automatically detect virtual size of qcow image (https://github.com/oVirt/ovirt-ansible-collection/pull/183).
+
+v1.2.3
+======
+
+Minor Changes
+-------------
+
+- engine_setup - Add missing restore task file and vars file (https://github.com/oVirt/ovirt-ansible-collection/pull/180).
+- hosted_engine_setup - Add after_add_host hook (https://github.com/oVirt/ovirt-ansible-collection/pull/181).
+
+v1.2.2
+======
+
+Bugfixes
+--------
+
+- hosted_engine_setup - Clean VNC encryption config (https://github.com/oVirt/ovirt-ansible-collection/pull/175/).
+- inventory plugin - Fix timestamp for Python 2 (https://github.com/oVirt/ovirt-ansible-collection/pull/173).
+
+v1.2.1
+======
+
+Bugfixes
+--------
+
+- disaster_recovery - Fix multiple configuration issues like paths, "~" support, user input messages, etc. (https://github.com/oVirt/ovirt-ansible-collection/pull/160).
+
+v1.2.0
+======
+
+Major Changes
+-------------
+
+- cluster_upgrade - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/94).
+- disaster_recovery - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/134).
+- engine_setup - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/69).
+- hosted_engine_setup - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/106).
+- image_template - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/95).
+- infra - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/92).
+- manageiq - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/97).
+- repositories - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/96).
+- shutdown_env - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/112).
+- vm_infra - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/93).
+
+Minor Changes
+-------------
+
+- Add GPL license (https://github.com/oVirt/ovirt-ansible-collection/pull/101).
+- hosted_engine_setup - Add compatibility_version (https://github.com/oVirt/ovirt-ansible-collection/pull/125).
+- ovirt_disk - ignore move of HE disks (https://github.com/oVirt/ovirt-ansible-collection/pull/162).
+- ovirt_nic - Add template_version (https://github.com/oVirt/ovirt-ansible-collection/pull/145).
+- ovirt_nic_info - Add template (https://github.com/oVirt/ovirt-ansible-collection/pull/146).
+- ovirt_vm_info - Add current_cd (https://github.com/oVirt/ovirt-ansible-collection/pull/144).
+
+Bugfixes
+--------
+
+- 01_create_target_hosted_engine_vm - Force basic authentication (https://github.com/oVirt/ovirt-ansible-collection/pull/131).
+- hosted_engine_setup - Allow uppercase characters in mac address (https://github.com/oVirt/ovirt-ansible-collection/pull/150).
+- hosted_engine_setup - set custom bios type of hosted-engine VM to Q35+SeaBIOS (https://github.com/oVirt/ovirt-ansible-collection/pull/129).
+- hosted_engine_setup - use zcat instead of gzip (https://github.com/oVirt/ovirt-ansible-collection/pull/130).
+- ovirt inventory - Add close of connection at the end (https://github.com/oVirt/ovirt-ansible-collection/pull/122).
+- ovirt_disk - dont move disk when already in storage_domain (https://github.com/oVirt/ovirt-ansible-collection/pull/135)
+- ovirt_disk - fix upload when direct upload fails (https://github.com/oVirt/ovirt-ansible-collection/pull/120).
+- ovirt_vm - Fix template search (https://github.com/oVirt/ovirt-ansible-collection/pull/132).
+- ovirt_vm - Rename q35_sea to q35_sea_bios (https://github.com/oVirt/ovirt-ansible-collection/pull/111).
+
+v1.1.2
+======
+
+v1.1.1
+======
+
+Minor Changes
+-------------
+
+- ovirt_permission - Fix FQCN documentation (https://github.com/oVirt/ovirt-ansible-collection/pull/63).
+
+v1.1.0
+======
+
+Major Changes
+-------------
+
+- ovirt_disk - Add backup (https://github.com/oVirt/ovirt-ansible-collection/pull/57).
+- ovirt_disk - Support direct upload/download (https://github.com/oVirt/ovirt-ansible-collection/pull/35).
+- ovirt_host - Add ssh_port (https://github.com/oVirt/ovirt-ansible-collection/pull/60).
+- ovirt_vm_os_info - Creation of module (https://github.com/oVirt/ovirt-ansible-collection/pull/26).
+
+Minor Changes
+-------------
+
+- ovirt inventory - Add creation_time (https://github.com/oVirt/ovirt-ansible-collection/pull/34).
+- ovirt inventory - Set inventory plugin insecure if no cafile defined (https://github.com/oVirt/ovirt-ansible-collection/pull/58).
+- ovirt_disk - Add upload image warning for correct format (https://github.com/oVirt/ovirt-ansible-collection/pull/22).
+- ovirt_disk - Force wait when uploading disk (https://github.com/oVirt/ovirt-ansible-collection/pull/43).
+- ovirt_disk - Upload_image_path autodetect size (https://github.com/oVirt/ovirt-ansible-collection/pull/19).
+- ovirt_network - Add support of removing vlan_tag (https://github.com/oVirt/ovirt-ansible-collection/pull/21).
+- ovirt_vm - Add documentation for custom_script under sysprep (https://github.com/oVirt/ovirt-ansible-collection/pull/52).
+- ovirt_vm - Hard code nic on_boot to true (https://github.com/oVirt/ovirt-ansible-collection/pull/45).
+
+Bugfixes
+--------
+
+- ovirt_disk - Fix activate (https://github.com/oVirt/ovirt-ansible-collection/pull/61).
+- ovirt_host_network - Fix custom_properties default value (https://github.com/oVirt/ovirt-ansible-collection/pull/65).
+- ovirt_quota - Fix vcpu_limit (https://github.com/oVirt/ovirt-ansible-collection/pull/44).
+- ovirt_vm - Fix cd_iso get all disks from storage domains (https://github.com/oVirt/ovirt-ansible-collection/pull/66).
+- ovirt_vm - Fix cd_iso search by name (https://github.com/oVirt/ovirt-ansible-collection/pull/51).
+
+New Modules
+-----------
+
+- ovirt.ovirt.ovirt_vm_os_info - Retrieve information on all supported oVirt/RHV operating systems
+
+v1.0.0
+======
+
+Minor Changes
+-------------
+
+- ovirt_cluster - Add migration_encrypted option (https://github.com/oVirt/ovirt-ansible-collection/pull/17).
+- ovirt_vm - Add bios_type (https://github.com/oVirt/ovirt-ansible-collection/pull/15).
+
+Bugfixes
+--------
+
+- ovirt_snapshot - Disk id was incorrectly set as disk_snapshot_id (https://github.com/oVirt/ovirt-ansible-collection/pull/5).
+- ovirt_storage_domain - Fix update_check warning_low_space (https://github.com/oVirt/ovirt-ansible-collection/pull/10).
+- ovirt_vm - Remove deprecated warning of boot params (https://github.com/oVirt/ovirt-ansible-collection/pull/3).
+
+New Plugins
+-----------
+
+Inventory
+~~~~~~~~~
+
+- ovirt.ovirt.ovirt - oVirt inventory source
+
+New Modules
+-----------
+
+- ovirt.ovirt.ovirt_affinity_group - Module to manage affinity groups in oVirt/RHV
+- ovirt.ovirt.ovirt_affinity_label - Module to manage affinity labels in oVirt/RHV
+- ovirt.ovirt.ovirt_affinity_label_info - Retrieve information about one or more oVirt/RHV affinity labels
+- ovirt.ovirt.ovirt_api_info - Retrieve information about the oVirt/RHV API
+- ovirt.ovirt.ovirt_auth - Module to manage authentication to oVirt/RHV
+- ovirt.ovirt.ovirt_cluster - Module to manage clusters in oVirt/RHV
+- ovirt.ovirt.ovirt_cluster_info - Retrieve information about one or more oVirt/RHV clusters
+- ovirt.ovirt.ovirt_datacenter - Module to manage data centers in oVirt/RHV
+- ovirt.ovirt.ovirt_datacenter_info - Retrieve information about one or more oVirt/RHV datacenters
+- ovirt.ovirt.ovirt_disk - Module to manage Virtual Machine and floating disks in oVirt/RHV
+- ovirt.ovirt.ovirt_disk_info - Retrieve information about one or more oVirt/RHV disks
+- ovirt.ovirt.ovirt_event - Create or delete an event in oVirt/RHV
+- ovirt.ovirt.ovirt_event_info - This module can be used to retrieve information about one or more oVirt/RHV events
+- ovirt.ovirt.ovirt_external_provider - Module to manage external providers in oVirt/RHV
+- ovirt.ovirt.ovirt_external_provider_info - Retrieve information about one or more oVirt/RHV external providers
+- ovirt.ovirt.ovirt_group - Module to manage groups in oVirt/RHV
+- ovirt.ovirt.ovirt_group_info - Retrieve information about one or more oVirt/RHV groups
+- ovirt.ovirt.ovirt_host - Module to manage hosts in oVirt/RHV
+- ovirt.ovirt.ovirt_host_info - Retrieve information about one or more oVirt/RHV hosts
+- ovirt.ovirt.ovirt_host_network - Module to manage host networks in oVirt/RHV
+- ovirt.ovirt.ovirt_host_pm - Module to manage power management of hosts in oVirt/RHV
+- ovirt.ovirt.ovirt_host_storage_info - Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)
+- ovirt.ovirt.ovirt_instance_type - Module to manage Instance Types in oVirt/RHV
+- ovirt.ovirt.ovirt_job - Module to manage jobs in oVirt/RHV
+- ovirt.ovirt.ovirt_mac_pool - Module to manage MAC pools in oVirt/RHV
+- ovirt.ovirt.ovirt_network - Module to manage logical networks in oVirt/RHV
+- ovirt.ovirt.ovirt_network_info - Retrieve information about one or more oVirt/RHV networks
+- ovirt.ovirt.ovirt_nic - Module to manage network interfaces of Virtual Machines in oVirt/RHV
+- ovirt.ovirt.ovirt_nic_info - Retrieve information about one or more oVirt/RHV virtual machine network interfaces
+- ovirt.ovirt.ovirt_permission - Module to manage permissions of users/groups in oVirt/RHV
+- ovirt.ovirt.ovirt_permission_info - Retrieve information about one or more oVirt/RHV permissions
+- ovirt.ovirt.ovirt_quota - Module to manage datacenter quotas in oVirt/RHV
+- ovirt.ovirt.ovirt_quota_info - Retrieve information about one or more oVirt/RHV quotas
+- ovirt.ovirt.ovirt_role - Module to manage roles in oVirt/RHV
+- ovirt.ovirt.ovirt_scheduling_policy_info - Retrieve information about one or more oVirt scheduling policies
+- ovirt.ovirt.ovirt_snapshot - Module to manage Virtual Machine Snapshots in oVirt/RHV
+- ovirt.ovirt.ovirt_snapshot_info - Retrieve information about one or more oVirt/RHV virtual machine snapshots
+- ovirt.ovirt.ovirt_storage_connection - Module to manage storage connections in oVirt
+- ovirt.ovirt.ovirt_storage_domain - Module to manage storage domains in oVirt/RHV
+- ovirt.ovirt.ovirt_storage_domain_info - Retrieve information about one or more oVirt/RHV storage domains
+- ovirt.ovirt.ovirt_storage_template_info - Retrieve information about one or more oVirt/RHV templates relate to a storage domain.
+- ovirt.ovirt.ovirt_storage_vm_info - Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain.
+- ovirt.ovirt.ovirt_tag - Module to manage tags in oVirt/RHV
+- ovirt.ovirt.ovirt_tag_info - Retrieve information about one or more oVirt/RHV tags
+- ovirt.ovirt.ovirt_template - Module to manage virtual machine templates in oVirt/RHV
+- ovirt.ovirt.ovirt_template_info - Retrieve information about one or more oVirt/RHV templates
+- ovirt.ovirt.ovirt_user - Module to manage users in oVirt/RHV
+- ovirt.ovirt.ovirt_user_info - Retrieve information about one or more oVirt/RHV users
+- ovirt.ovirt.ovirt_vm - Module to manage Virtual Machines in oVirt/RHV
+- ovirt.ovirt.ovirt_vm_info - Retrieve information about one or more oVirt/RHV virtual machines
+- ovirt.ovirt.ovirt_vmpool - Module to manage VM pools in oVirt/RHV
+- ovirt.ovirt.ovirt_vmpool_info - Retrieve information about one or more oVirt/RHV vmpools
+- ovirt.ovirt.ovirt_vnic_profile - Module to manage vNIC profile of network in oVirt/RHV
+- ovirt.ovirt.ovirt_vnic_profile_info - Retrieve information about one or more oVirt/RHV vnic profiles
diff --git a/ansible_collections/ovirt/ovirt/FILES.json b/ansible_collections/ovirt/ovirt/FILES.json
new file mode 100644
index 000000000..7980a48bc
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/FILES.json
@@ -0,0 +1,3428 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "automation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "automation/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c29749d822aebf8e458fb5dddef632ad990b77ec16543ba0984589ab53064608",
+ "format": 1
+ },
+ {
+ "name": "automation/build.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ef53c1759ac7c884e1429fc03e9eecfecbc74ac8800f5644eb13a3059fc2c02",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/.placeholder",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eeaeb3bcb0e4223384351f71d75972cc5977d76a808010a8af20e3a2c67fefc",
+ "format": 1
+ },
+ {
+ "name": "changelogs/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be61604e7e4d2c3d2c1a6834828bc05589fba2c4b80332a9476c8c2598b3389b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9855447b14e048a16cd7877ffeab3bfe07496680c55055a3e8de8c0d2fb64bd",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e50d3236e8cccb15c89ccca4bdc7c795dfe6edac79dee7dbfa2b2ee62862b68",
+ "format": 1
+ },
+ {
+ "name": "examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "examples/filters",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "examples/filters/ovirtdiff.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66d5ef341d6075c3bf9671fd5b25f41642ef94127ca295e69901b41da9242e2d",
+ "format": 1
+ },
+ {
+ "name": "examples/filters/vmips.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3c0ede23c25926f83af778160d4b52ea6c11e1dde1e97233dfff27ab8ab835b",
+ "format": 1
+ },
+ {
+ "name": "examples/ovirt_ansible_collections.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e81805423a3ebec7b37b6d380a6fa76732fb3325f3af170eb498c481ddad1873",
+ "format": 1
+ },
+ {
+ "name": "licenses",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "licenses/Apache-license.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6d5b461deb8038ce0e083c9cb7f59859caa04c9b4f72149367393e9b252cf14",
+ "format": 1
+ },
+ {
+ "name": "licenses/GPL-license.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/execution-environment.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30270de38aee5490073ea0c04a2202948e0edeb671fc0d5f0d441472c6856592",
+ "format": 1
+ },
+ {
+ "name": "meta/requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12b1ba483812c1f1012e4379c1fad9039ff728d2be82d2d1cd96118e9ff7b96b",
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25354b3afabd2b5a0c3e209aeb30b9002752345651a4dbd6e74adcc0291999c2",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/callback",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/stdout.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1945aee0ab3daf085f1ebe4b99f028160aded0bc7de35059cb41ed5fb4761db9",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/ovirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0cda744dca79d659df4846df7b0e257ba33f5e92e8f98776a6e20b49a1b285e",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/ovirt_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87131c23c708320037e45ebd46773d2e48fcb38ba79503a5e573dd1037a857d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/convert_to_bytes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b83892c6a71f5cab3c93dfc93626d901b7cef7bfd704255aa1ac78424ae3426",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/convert_to_bytes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5cd2b833e5f7de2cca994ddcbf3d2c9a99d4e989ab9cf78c231336fc04c15fa4",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/filtervalue.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d26325ae24aa363744d7166bf017dcf53fa74ef74f1d3299a3d99d299db307b9",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/get_network_xml_to_dict.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02f7e3f247c2d4dc5d3c4191aec83a5017338d5004b0008fd585fd16d0523d54",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/get_ovf_disk_size.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b58e408d4fcd3ed6ac7016d984588c3565b83b3e2139cb71e11ba8aef38c9d18",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/get_ovf_disk_size.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8fc13373ae2e97d7e2b90b73c6e0f235eec00d0e8327475c5ccde830ce39a868",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/json_query.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1754fc223cf8315816d846798dad5e9a07daef8e1b6adaa282b15afa3ca48983",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/json_query.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a0062d16af7209efcb4a4bb2cd2b5329a54f9d19311323097c86d90e63b082f",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/ovirtdiff.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc1bc2085850080372e875c508069dd417262df2c99bef29c47aa457f161aec1",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/ovirtvmip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95377cac0c2916f8e15eb504f1541327dbfb729ce0e33165c83e3b0bc574e7d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/ovirtvmip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db8735e7f4e469300d3505718061031d71d3483616723fad483ed9cd74ee4cb1",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/ovirtvmips.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd934b7bcf81c838f0f601446953c82a05887e04769ab6f80ecfb786fe56566d",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/ovirtvmipsv4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce9ef1c18ddfbe6c42ba50095c398321470f7399f5a0fe967095e513b4ba2bcd",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/ovirtvmipsv6.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5075356bf5fa22a0d2601baf03fc50bdb6c91aad9543f3983d8bf8ee2b3080d4",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/ovirtvmipv4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "838bffeb5436ac25f3888124eab7dc1903d432f75d8e43bd73dba1433317d5d5",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/ovirtvmipv6.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "833ee9ec1c40054ba6349593176630fe3fc305baf026f41bf880a655bbf697a9",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/removesensitivevmdata.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bac378ead2a6a37613460d3852756744706d17d8a892205594363d183ccf7b86",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/ovirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e0e0349a91b28f4628726cc43c379b1eff80ee6297687a95217e3b60889c6a4",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/cloud.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fc3ad35c92926ddc389feb93244ed0432321a0ff01861f2a62c96582991298c",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ovirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9664d0a239e76e27c15f1f5a8aa3c78e9ce6db8691d51ce38fb5a874010f5da6",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/version.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3de6a89533b19b883f7f3319e8de780acc0d53f3f5caed1f3006e384232ce60",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_affinity_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "094bf12fcae763c9b0d8d662c4a9ac87ca3f0721224c08a5324aeb940154f8d7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_affinity_label.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c612b78a1282cf0ada1e46779879ba79dfcbeec9f6919bfc5c688894513a505b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_api_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d27663995af7b31f1dc83dc14de62d977ce6a2f7bec143938caad2bef1fcbb09",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_auth.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96b43cc0659ec70a9b89b519bbb66c82d6bcb8443e79e56dd4b35fb4124d22c9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_cluster.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f7fe11427cdda046d1c1aeb39c60c2ca49ca41a8d0d36500e96db961ae74ce4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_datacenter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82728d18290f7e671043c3e51db9b858f03b203be5ab178b7db4d621b2b64356",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_disk_profile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67d05ef0ad11058ed6a18da8a4079ab8d4d8902a797412646d8e7f2d09464c35",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_event.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd7d258428744e7109fea1b8dfe4250e9e0d07664cf583141bdef23cacab4b1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_external_provider.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2253c69cd894e5d8b1d13b19e374cfbd8290ee683058ef333b6119c16828df7b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7cd6538d4e18d19e816ca078ea597702b596cbea738ca4bce9f2cde20745b9fb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c45c02372b6c9a67e57481ff65f6f45a5b340deaa33a3aedac58e47babe0840a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9f283291e0c66aea729c6f18d9905cd2ba7e0c598ae0636c493f3068a610584",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host_pm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51ec49b40fa65d456a38048f325df7982c7debdeb48bd137318c30240f155b29",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_instance_type.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1856d38726508ff171c3b8d2d5aea7342a2e62c254de87a28cb7ddc1b090752",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_job.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d92d289cc7309d7304cf5d5601fd0806b9cef7e179fd16bdf4ed45e43912d51",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_mac_pool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c5822a799199e4d7d6e59ec7c0f67a3e56e5728ac0aa5a217b4746f7d4155dc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74b1b135135714e20a39b82d4ba80b78a146b57bd7f09ecf8baf7f2b4a29eacc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_permission.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6d49d7868f708a6af6c2439df2e45a4b3df00ac40574e24e6f33ec9569d0ba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_qos.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02c19d26cb46441133503e4b9cd6415d79d3d40bc8ee529a1421b172ace5d9df",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_quota.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7560839553cf19fbb2cdab3784f2c19e7dc2ed6beda2dcf9137b57156ea7d004",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69567225f0e6c4d2d84a84e562b6881d648b8422141b318efd225459eebd99a7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2315d7cf9362a56213acbc5d17d8bdf7c9180dd6dcb5cf875d402add95ca5a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_connection.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7c12a7215094295957cd95b9040220cad09db91492cfd463ffa9fe179d32f82",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_domain.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8ad293631ebe87bfdc3e38e6bb39827bee67b3a2c1e66a9a278c60cdcbf926b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_tag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41961b31845824591a0691d386e9a8f8ac2c1b85415b522109a8f1f4e798e161",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d92fa178a06c72c049e5bed7c8b20eb88d243c8fb894c94832e07284a853d89",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3002aae2edeff14d001c9bea614ad29c92132ae4175ce678c3269b1387f1dad0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "81b38693a37162b80e534c674d4a93a4d1b0873849cec6ac58a4216f816b25c7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vmpool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "badbc107526c539e0a22a127931d80a3621caf3ca244a6e336ea38262e08b20c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vnic_profile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "165440d6bc6f89eccd5b32b2ea80c4990f10406a241b3338f47577a449872125",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_affinity_label_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c2ec4c338c4fd133d694afb3fc6cefc38e80b34f97aeb333044dc319e2c803d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_cluster_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26de35adbb385309a7637f60dfac502c097c36ea16b7cf8abe197d71534a1fd0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_datacenter_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5032925f5b2d4ab3bc65d2e94d7c52d2611183114521c6e783db73d6acd9d2c7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_disk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "605ec42115ffb9ed23e29d1d793510052088604a7a828edebe2d6bc7b08ef299",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_disk_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88842b5a9a3d78a1bba01a301d9a8f11fb3aa05618b8bf8b1579c3905a5ed6ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_event_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d31f5bd80058b6ad4bf24b6236e3bea5c28d953ea372d6a0753110fd911a7cc2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_external_provider_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ba0e106dbfb2670e5733db837f4cd167696a63ec94da1bce7f3bed323cdeaa6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fd64027bb004558774df1a58f9bbc6c2552e6e62655664dbefbffbdf3a3634c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ed53c8a386f84ed19d375a5b8edb36c8ac25b6897a77f82ab5d728fd6bd1811",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host_storage_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb1602bbc1c36a6bb2981cfd1895be2d5eb46fd41997e77e06e0f014630761e2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c6a33e1e9ca127511fea377380aa4b65aecf880ee3940a10cb21ee42065d765",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_nic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35d865a6a853c47b0ea6b5228ad27f8cc680bf48ecc14520e90e9a5cac4c1dfa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_nic_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "454e122d9326ec4a9912b90212e2beb148992034309d27978f9852e524376a2b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_permission_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3becf549aeeb749f48bf9223d5f8e47e836e67afd34bef0d027000479d223292",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_quota_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4c7fae770a7a14f869e937b0f26d351ecd37a18297f10d724f8fb39444c5437",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_scheduling_policy_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b965589a238e6ea02d94742f3555affec4cf3035889b77d1d01d978bdb0b74f8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_snapshot_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61451186d9dbce130a66829a0ca2ed47a5e12125c521036d47ce85710378f1aa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_domain_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b74466500305497c2fc95994a0795ebaf574a205cc8726d985ff00eb01c4622",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_template_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a2a635810433659f9700a6f1d23485c8f33bba924f02a26c26f3fb3e72bff463",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_vm_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05d6b2ec183079d340126eb6a7840555770e76099ca321b6314fc5c67f129f2c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_system_option_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbf2bf24a4fea1906165c0c92c3ddbc1d99aa33665c550eaa7d95778a8f2ab15",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_tag_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4fc736ee9a8364d6cf75658d0a58ab97dbe6132bf2abb8f8d077f9636130e855",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_template_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56347a6b45c2900520b3f72b0d95f3a7041b24547934c0fd3d7a4520612c989f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "33733fcec5e6fb969de04ae8f454ddd152518d778eb292baf56e12b0c09e1684",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vm_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be4ec699b7843b39cc292df76060c7e48ddd3893afeb86c8f3f000b0b4571517",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vm_os_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08d4ac6875974350cde35cc0e15b7fac67ad13e968280276fe6eecceab649269",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vmpool_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1abc015607272f80a5d708b123ae44507f6868cfc437d16f55d26799e427adef",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vnic_profile_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "655b66528fec82f009ae4fad3b942c9a97582c612a419594b0fcdefec00626dd",
+ "format": 1
+ },
+ {
+ "name": "plugins/test",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/test/ovirt_proxied_check.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4cd7ecf3b7f7f467865eaafd095af27707d839b7de357d0a9120e21cdde19d88",
+ "format": 1
+ },
+ {
+ "name": "roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8ca3c1448aef8b92f96382f9b6706333724dffd426c98eb8212f1fb526cdbcd",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/examples/cluster_upgrade.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5328d9e91b8d6e3a6464baae5fc0398f848cfcbcb75398fbdf659ad1b73a403",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/tasks/cluster_policy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "139ec62ccbbe68bca8da3eac9675ad610253d5a34f7ba192620bae3a2c924242",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/tasks/log_progress.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "474165c83e5ac44adbec3d9a89ea78c46bc01f4e54c667374416ca1743fd0fb7",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/tasks/pinned_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a66b3cdb5176d43714f92c16dbcd0a5a4393e49a67ecdeed78965c3c69abbeb0",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/tasks/upgrade.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca4dc6a66466fd0751446d790c275a34682bcb30e88c8e0ee3e81a9f18e5a565",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9a3b444a6b0c04937f184388b513508942f1c1b21393a869709c58bc2d7dad2",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b761686d3fa460a4f440601407721aff898e919363fac5a2ee90cede476e5e9",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "250d197c20350193f1f3908fc4a512ffed490a02da299c3d4ce8cc3717009e61",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/examples/disaster_recovery_vars.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01ab09aa3035f63fecd327f987a47c6b796bd0065d76ec6f03082a9412515a9b",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/examples/dr_ovirt_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7dc8c63e44eb73c8147744ec27bb1ce42038195f2b727baf7751d5d69a518571",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/examples/dr_play.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff5a6c4767651187cc6d3574997f968322e00451b9cb37d2d7689d2198648c0a",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/examples/ovirt_passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6368b1291884cbd248e720948fc6a5709f04a1b07bfb78afdb615117a57da0e",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/bcolors.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c50fc3bc67ce7e03419432047893b31eb20a1d1fe57df3fda7a62da93b2fced",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/dr.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "090e3e3941bbca1da7720e2e5211cb6704ecb800f3e7d0e8d95438e21037dc6b",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/fail_back.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05bca5c0ba307c5c1207b1ada42666cd04a6c0e5ca6da8a648e5232e70557da8",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/fail_over.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ffca020e82de85b5b83757682134e5da69ca6a9287f9f6db0ef602aed77b81d",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/generate_mapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdcd605745171f37b3049714acc6b830959e31311710aeae02ccb95b831595bc",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/generate_vars.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15e907807973a077c8f7bb97fb764dfcd03722dd37a7c2565777d4ac185ee75f",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/generate_vars_test.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bad4d717ee7c27a022536bacc01a788c7944b36cc96a769155de5477d2bb3b95",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/ovirt-dr",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b17459db89da23f6f5f20d212ddde6e7f3879b8df610dace4e084be7533d3c7",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/validator.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be65237aadb4f613add628535afde01789cf95e588ed84ca6a04af3f48c54a45",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/vault_secret.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e5a276f384cac4d78e76145869238a895b959fb123a33ab652b81b830f8378e",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_disks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4904dba739bbcc872d023e055c9c1a3dd786bfa1199f72ee7e6cb3f99ef474a0",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c0126aca79a6f188488fe47e148eb62bf97d553a359fac5e0db30a2cd972449",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_domain_process.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d44f6b334b85c43a6733325ea85433e85e892f878610cb0fd359ddb9e9efcb5",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e0759c74c512d7c2ada796fa2ebf0c5004be77339525bb61ebb2441ff00b0d4",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "138db98f76c303349a8d8e07d45cdcf37150d65611f3dae86820c37026aa0527",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "281872dffc6bedb2d5f2eababba225dcb68d97f1296060f7e22ba3c0d0c2cc7d",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/shutdown_vm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08bc7c3a54e4b0a06301014814bff4976c72d2d6aa3bafdddb793c7fd1714503",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/shutdown_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "512ef199851ec2deeb75ee8230e2c9acde7571e7e88ace12aa41a13e195727b9",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/update_ovf_store.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54d45e1a5d0a357cffc1bd2e64eed83bb1281de575544b60dd049625644c8db2",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c29e7744719cb4e5edd154ce875f2dedaa945f8526907c53569fb29071ba0af",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_fcp_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1304953f4f321a466ec3f40e3da6ab077b3555173e19fb2029b566682ceddee",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7afa2849e8e51641ef8f10f0a671897367661553e25987712383bb70f3b90fd",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af856b275b930d302aefcedb84d4b8fc853520f405515d271c4bad3e10d2a2f3",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_nfs_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16300fe776b3d147e3fd2cd041854b1c11b9c737e1046516a4fd1b2909e5472e",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40cd9b57a069d91e8378aed4f91254ddcd02177cdf13acaab366773c5421543f",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/print_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9fb07229582ba647600d609900e5c3b580f6e881a9c401505a3d757cae19e94f",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/register_template.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39f767a131032417b0576a9e3e0c3c6b703b50d65c67ff4826443e2fda5a1d30",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/register_templates.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8dea06f6718adf565297ded44dfc6dd44a52ea6386b2246814719b034bb531a3",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/register_vm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb43083203fd11a54205d886ed340acda2cffaf7dd5d4f95373cc75f791f5db7",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/register_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04940777097a9af94f44ae5f751154974e89ffa2747fa22317bb5ff7c865710c",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/report_log_template.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a6b48e869863fab445b983c2d4a4fa43bc3cb76620e3548c25ab8f54c89b91e",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/run_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee38d9138d515bba6f285bc7f29aaac3d063be546df6a7fbc72e6049237db449",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean_engine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3768e720a7b4a3c88909a2da457148be453ebef0149bd78bb2db97678fd2b94f",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/generate_mapping.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a48362c1e153dfc5ed6b9d90e6e40d094e85775ebd8855004084c628b1fe149",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdb9e3dbd3398fee4a37ffb289c9274a9e040fdfdc14c51ec09f7019919b2ca5",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover_engine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "269691f765b9610e0cffe4cf718fcefa45bdf04287831d27a7cbc08c6d663a28",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/run_unregistered_entities.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a95e9dc5ec64b98a53d972ce1c339e946130ce9b716d94e2f605f62bee20259",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/unregister_entities.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49dfc30fc3f6b55a92a3016a7882ca7514282968de1ae0dfbd8b1054d0d51116",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6646b07417defb12746bb28acf5d4f7dba0d09f5623be42600f8071c82b20a67",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c8f572573fd60ddd0d28fc043a0bb457db1315d00dcf3007eaed4c656b055d4",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/examples/engine-deploy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da334bb722a37e28d61d5d7e5340b3a1abde08fb7b210dfed413d02d6c253fa8",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/examples/engine-upgrade.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ddca8cd12921dd36298bc8f26c019db5c289ee362f752b077b5373dadfc4a07",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8fed7a17e985ba5217acda961a8a41c98fa41d56f2b7046a82977da7b3ceea6",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks/engine_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "862f800ab8c69ee875aa991a6d0a5bf3875d41fe5aeb7fcbabaaf23350b98f02",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks/install_packages.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e57dc32606b071aa2c4fe5327e018c634453e76042f3a1c054b5df24dd6fb49b",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07e406fcafdb599a48f099306c8da522d166b93ee28c453a09a715a9a2e8536e",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks/pre_install_checks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bde6ab43a2d78f5fee146994a18a1815f2ab3c61f437817454311a1f5be8859",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks/restore_engine_from_file.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "314c605bae9a6a3619e1c184406a0d0c2aba0c81f2cb34ccea5a019071e2c532",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.1_basic.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d208930a8c1a973a018c7476a10f3a1857d9562ffa3037af797e62d9fe47aa3",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.1_upgrade.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6295859733ac8997107fe0644a1788ba0b6f88e729aa57e67c9de1d0fb1e2bf4",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.2_basic.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e563077a2da3efc212864c7f23639af0f0271a39e21fb38a27bf9ba53174f86a",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.2_upgrade.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "674c9ed2cda7dca01bbedd549f17ab1ea4d24db6d7cc4308a8683bdb20d00a55",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.3_basic.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3e56da1f79927e222e08a8814165104272951dba6e3da64cd50c67280493165",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.3_upgrade.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4691c06ff6c2c79f7754ed23337bd5c425b1349eef196b58aec360676df57041",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.4_basic.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d37695fb5b95ef077458629a54b4962a82f4e2a9f5d12cf8e3e748e7e66d6398",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.4_upgrade.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5b42d55ad4abe75d15aa800b697f6ff7d425bae8dc7bde5e0da36f805f24c45",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.5_basic.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82b3514110bc38936ebcff5c80ca1a99926d5a1351fe4aaade8b0f6d2fabb1cd",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.5_upgrade.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66168916bf959bdadeb0c2212c938ad10c472e7cefd64470513021465929e60c",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/basic_answerfile.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59936bcb7c900b9779ecfed684759145e85eafac01dc7602440e844a8c55c73f",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/containers-deploy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3c2c21cb3d0aa75d7bfeab5a56a1bdbcac587e7d9c09656bb30f3f8f352ece",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/engine-deploy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e898c8c0035f4be61a893e691998bf17cace4ddd4628c3d3f73230b1a8663b2",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/engine-upgrade.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d5698a32c3605fc3c97282a9564a47679af7a23776203c5ff2a9cb349b28d12",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "669dea0f087198b19e89c45cb705e8439f9d1139a29d63264be472ef47b33b9e",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8fed7a17e985ba5217acda961a8a41c98fa41d56f2b7046a82977da7b3ceea6",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc296c4e43917486b4a713e2f50b075b88fff850a6f0901081368428686ea431",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/test-4.2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddbeb082e3e49d7265a4a6b4844009c2ba7761add8c6bbdd93e2c83cbcbe0b75",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/test-master.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0990dd37b7c78b73cb88eacf4fc14725b637545849ef60b15cdd860e81f518ac",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/test-upgrade-4.2-to-master.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0eb240194881bb664e0adbad326c5c51c7dd2ae7dccd26867f36bc3b40719dc1",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d162b680ab1278e0deba1007812e444312bde31dc96150507df65122b3ff9cf",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1602b7bba86c0b8c69ffd91dff866ce75fb8538b99fd0712d5a5b09574cf209",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "071e8947f0d2c9308f8820d6aeab02d08ee7991df0228a03a52ebcef12acd3ea",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/hosted_engine_deploy_localhost.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67398b5ebb07bcbc56ba3269c3ab11672bdad44ef34eaf5a54d6d834ff1fb05e",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/hosted_engine_deploy_remotehost.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2d43b9deaf49874e8c0b3303a466cea9b2d5848353da5feb5cd2080347863c9",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/iscsi_deployment_remote.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd684ba2bf2c243ea49fcc3f724f80c11f3bff5aec833c539b363e2d8a664029",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/nfs_deployment.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e502f3e646feaf3043835aafafba260ae5dd804901ab11254ef8622a6058689b",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d743b2921acb5121edb77586f3357245704891451720da05f5f7677230f8a94",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/required_networks_fix.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec7b32cfd216364b2fc795df93d753f958f5f570e9940d0a19d786e23dcf8aaa",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/files/35-allow-ansible-for-vdsm.rules",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b562418571a4657f858b5feaf149e4f983480b7871955264f185c5b213595adf",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/after_add_host",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/after_add_host/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99f37c2644368879fc3f47df1673128068861c0c551488f3dd1d6c0ef930b943",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/after_setup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/after_setup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21b644ac87bacf4df09f35510e7e47c43e179e5e4c4c297ac381d892e3c101eb",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/after_setup/add_host_storage_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "982ac73dbfb8969a1ccdc87af4ae4629b191e148ae9dd61f97e2fec04a83e7cb",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/enginevm_after_engine_setup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/enginevm_after_engine_setup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab33f419e16208c32590d47c90d428f23dad1528151b994d4ce4dd07ba0955d3",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/enginevm_before_engine_setup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/enginevm_before_engine_setup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34769789fabe1aa2e5637fb5242e429ff8dc5fc9614d816165787937f767ecff",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm/01_prepare_routing_rules.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50ecb5b02426bb7b158de1e958bc1af682770225db1af4efa4c5f12f5de1f2b0",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm/02_create_local_vm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4fd0dc610e2658e79232182c6dac2ab1c3cd7b425fa3b6b5fa658b19001d415c",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f24c438123409883e229b91d8db67013d68b0714b86e7560cf9f435e8223d13",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm/04_engine_final_tasks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af77a73d076033c2341210d0167e80f3d29c7d05ef08dacf2e1fad842338e32d",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm/05_add_host.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4fe15c23e8666aad557807a8c16d2a762c9ad0ac85191381bb63ccc5c425cb56",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/create_target_vm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/create_target_vm/01_create_target_hosted_engine_vm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb7c1d5c7fab5dacf7e9d2cbe71578b75b03b1f7f9c7d910833c1217bc56b453",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/create_target_vm/02_engine_vm_configuration.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d64591cb198ee1a7ad6625dc188f8b40384e99d2dd45b9d149331ce634923677",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/create_target_vm/03_hosted_engine_final_tasks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecccd9ca3929323054d45ec60f407ea97276396474ad8b0e8a7b4a5fbf2025b6",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/001_validate_network_interfaces.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88f3fb78661fd4a02ea9c5b39bceeca743aa373a8763f26b371c4343e8412641",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/002_validate_hostname_tasks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f481df891c266e54e9c3ef02b8f3ce0481d6f992cf184dbd7e051d591bb4327",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/define_variables.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88eddc08e5e5b704f6e18d19b0d12e65bb1bb90341e89ef0f49c1c1904d0d250",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_data_center_name.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f569ed5777d1a09bec53ae1078e9f48081fcb6a3979ee19cc35c17a2d9e89189",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_firewalld.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "072eaf9316569ed0de7d73f3ef1713bed0af6bfb4f94d016147ec8cae6f3e825",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_gateway.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c9bfda85d5f4e801535e2827f9971d63174585afe037109a834c103d6acee6a",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_mac_address.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c66e99b7b224c0b68e0e6420e20d4dea42a08ce995d00460988259b0570b3c3a",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_memory_size.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4bd2ef1a84ab4bafcaae6e266056bbe8efe04728d0f381f80cb5192d95260d5",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_network_test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "645b12514a5072f220253ad44f2d946f38bf45d043284a4ec809dfc3c28a9db3",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_services_status.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93fffffec58c3fec8248e0021107c93532726f0cdbb304a626d0b6ac79eb4c3c",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_vcpus_count.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7e7df2877ea9490a462c4edcae383bd73aa275da4e357bfc1d79c154a469a39",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/add_engine_as_ansible_host.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b256105f4a4f66a6e23e03dc00bea3aa0d5a29772e043d34ba3143d34b363a7e",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/alter_libvirt_default_net_configuration.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e3de754f0373d6586bc9cf48c917ca49cc72e264af467366cf10906b30ad5ee",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/apply_openscap_profile.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c77fdd82c2d788092f2213e37187081340c946d4b34a72a6831161113e093125",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/auth_revoke.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8fac25039bfd3c11600124fa227e6a198a404a9a06847710fd9d040b8507ba70",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/auth_sso.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "740a3698d73da71f3fe9325a811b309157c849837167f7e89a6db7327e04f5bf",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/clean_cloud_init_config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fdfa13f3471ed27b2ee93b6e66ab0fc78e9e1f86013fb940aac875441f4aa7cc",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/clean_local_storage_pools.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e55bcc5b1796b26c86be3bc7f40134086c3bc48fd14c43f68ff30c805311819",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/clean_localvm_dir.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb034920d818a04ed3674f35880f28a108d5023272b0e801517af83cb514cf95",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/create_storage_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42e304c1d08e8b79b59440a0766063fc43ff8cddb312df33f463e37726110ea4",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/fc_getdevices.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2608dffb9d636db3337d9d9f55cf3809e8ec3fc27914d619eba32daf92d79330",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/fetch_engine_logs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f6e0b81f31f65da82dd872b278e002f8934ef3c69ce4096091f7b2eae5cc86a",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/fetch_host_ip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0252b7c4117a704486dab2cc0327693367347d2af1a48438c82ba6423648a33f",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/filter_team_devices.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5de9a207eead341ddf6db0a2a58971e37b9de2c86d345abfcc5e8dcd585abc50",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/filter_unsupported_vlan_devices.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46fde29f53a0556bb726ba3274baece3f06632114a353e5d21c12931357241a0",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/final_clean.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f34840bd6ad16a16f22687d9deee369eef7f53d72f0d172a0a63d89cb5dcbbf5",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/full_execution.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8012a3860ef1d33a7dc1542a9e5001d1153b5e343b1004f27c607a9fd885d773",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/get_appliance_dist.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58f72cdfa5e76fab14766e407542ce871033b501a5165a7c4199ff264edefe89",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/get_local_vm_disk_path.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06ea4b5fd5f94f8173680093e3fac76c8a628c6b646901cce30ac1559d107fb7",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/install_appliance.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "71fdc499e89c44671510b2c400c7450e8fd49f743db42f1e44120c25494fb0ee",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/install_packages.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c98b24384a2d27657cac75f3414eef7ac37471e7e6d1062166685f2cdfba9d4",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/ipv_switch.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "711996872769061a1115daa045865c1ac19abd79102c4df1137a0a8f2271d3fa",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/iscsi_discover.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72d761ba4daaedeac8e34dca6bcc0eeebde701128bb7b5fdbe96b08d286c177d",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/iscsi_getdevices.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10b2609a5041eb0b85fbd38462865d47ed7bad00a6d0dc09462832e00f41b3c0",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27fc692c3676b70eb4d3151dc56485fa1327697b55da5796510f5383e5103c93",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/partial_execution.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00ff4a185254d24adc2bf63fe1aa8b99eed49f38ce0f902cd6e9637b3ffb0f21",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pause_execution.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7ad99d745ce1f1580feeb39ce1664a2e5845a8b205b1edd13f5d9284088d1b0",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/restore_backup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ff3cdabdd919efa09b25ee7f168f40203497a0be4e549229d931fbd4e84164f",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/restore_host_redeploy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76d792777a58ed7d04a034b2dd546e12c92a854fb148752b85464db63b90b508",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/search_available_network_subnet.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "808cda0fb20b45d96a1992ef941996dd18791a789e865a06a9e2e49c82c0cbaf",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/sync_on_engine_machine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc1907519278a8e3867f938f8f4f2bde39a9458670d851855890ae6de344ee3e",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/validate_ip_prefix.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89fd2e1962afd271a930373641f3636f55105b6399880e10a60659338e9bdf29",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/validate_vlan_bond_mode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ea0d9aa61a5da96b08fbaa824c0e86f5237e95633fcd079bbb3c8e608e6a32f",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/validate_vlan_name.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "768f9061d2b4db60d9f1f2bd67c19ce469e3058fddd39d0ca5ec0f9679517173",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/initial_clean.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9235657660efb0b3cfb7c4bda6c0ec7c48e21d8c4ef07a949ab9bd0ee9b973db",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/broker.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5d0691008ba803c77c10312be55b10e47ca2e9d04049c8751c12aac2c63452a",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/fhanswers.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd814bc72fb77a283864d2d2bcc729cd1e1424e55d96d3e3c52ac7fe86c4ed6e",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/hosted-engine.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3a530d046a7e5b4c7b0691e6b6e8412e21b9480664604df04496a5db16acf91",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/ifcfg-eth0-dhcp.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47f188d5d7f0c676a3bb4cdcd10eade0d329f2b22e898ee2865b5a99958f0f28",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/ifcfg-eth0-static-ipv6.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09285f1984781aed82c4633d575e28734689fe6ad2d7f24fdda7ce8fe6ffa8da",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/ifcfg-eth0-static.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c5713af9904015a96eb5c85156013c86d6262978329a2ecb098d3f2157632aa",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/meta-data.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4d1e14ea63ecadccab3ad35809d1126103ac83c0b8348af1c7ec1f9eeb5356d",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/network-config-dhcp.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f3c4a0cbfd45674a49a269d7be2060fbc9debe94243ba095f018b95f2aae88d",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/network-config.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cecf8d5c0c6e8de789058d57e33112c93ee9793b0d47c5e1d9af87009e046cd",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/user-data.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee53560828d127e697b6fac3e706af225214050da817ef0ba474619233a18f56",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/version.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df9d6e828900b2b252f9222428edb7aa98e0a96f4885c6626397c4281aa0b01c",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/vm.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e12a9773fa641e2d90007baf8275b692f880a568f6f21ec896ba10d89661772",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6fd54dd1ee928aa2d5157715bb6efdc7b149235a1dfe84a35479a409ce014d6",
+ "format": 1
+ },
+ {
+ "name": "roles/infra",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43cbf9c83626b92441ab813a6ed0967521abd594700db7f4a74afb10fb869634",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples/vars/ovirt_infra_vars.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea7382f07df13bb80695ee19c38c52aaa48138f85a5fd2a9c7a78efaf6f19411",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples/vars/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples/ovirt_infra.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a33e886ee1863693e8596d4e97aa28cdd0f774ab57f766699d3b80dd5ae7bdce",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples/ovirt_infra_destroy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec9b972a04e5e16cb267b9c492935300023dd2751d530b606c22852d7eb6eaee",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17d0abc72b21f8c4705c7f3a2685e127b5a35fd0fe7b7e8fd1d7fcf70ba00de3",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bf4dcaf0397579431601a07b3faf9552e6e064655d544a49a2e019de0efa77d",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4375cfc057fb42b16a8b0fbeb7715712355cac4d03e440b396608b1bf4fa27cc",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a574b7097e21f095e7e46e57e770eed17ce6252eada2415d784a90a1cd3de7db",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dd97b72b356fb9fda8d10d8c5877c5a9ad8db4b42cf17018d983ab56cbee10a",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f7e8ddf2319f57e14f63216f2579e695b3f51fe9f1db47442ca7d9e3fd60846",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30ac2342a8f199951885187e8974c55a3c5a4bc0283746e430a5e34804d4f895",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/cluster_and_hosts.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eaec02db023b502b330e5400406153522e0f70441d013e934c0b4db72fa9e45f",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/datacenter.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e569a2c53d245cf4fd7dc8a61a5cc6818ef35c9ecc1f00f9347985b1f0862f1",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/disks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "356a72a24f1fc2190b8f13ac1a9b51422a444e221f3fe2a7085f91c22ac7f9bb",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e04a79449a131fc5fed5c509b2c8a268cef7c167cbb7443ad13f2d402c02a48",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/storages_last.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56f222731a7c058905e9d003ecda5bf9de0ce94f9d71b7650157552919504052",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/storages_pre.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "570a2b8e93e98cc0bfbd78430959a0db65d500f54a39308db3a4f84394a618c6",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/templates.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42535d73df3a7563605686e07752af0f911435b25a80a04ab9241f5b0c65c386",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/vm_pools.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "869addec6825d3ef8aef24b6ecdd96d759af75ecc99e9ce5f7108b2fe33b69fa",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d82a8266fb68387223bf4a821cb6c9abe17a51a2567eb14e0e718d5558fecb12",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f50289bf733588f37db2429f37781ded326c0d74e18c697d79515022e5f38657",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddb8e440f777516ca7dc411535887948bcbe53246b4181b3cb198f80dc472da3",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7196cdd92719bc397af18398173c424b8d131f4ee6dba7c1278f6bad1e612b2",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "214b779ef23478dac5fbca69527d16ef268ef69a0d21ecb90c9299b05b901599",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/external_providers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/external_providers/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/external_providers/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5bfafe45e58d4d79d867d3f0b330d824f16b5635dc7a5891846fddc1574f82cb",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/external_providers/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a02ecce75eb99b2607ea7fd89cfbc6d3a3078d99d84f3156ea383fd3bc0cc6f2",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ffe29f2b3b80f58c6e9b98baee20af1af525b30a5e1eea75348e3e2e1a554ce",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67ff6d8584476db0f60efd67135f9a7d38e937ba4206f9580b69b8970250bcc1",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b9289ded6f5b48eaa09c6618d84253294f618353ee836d767261afaf0d5ff06",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/mac_pools",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/mac_pools/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/mac_pools/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "668356454fedd30e240952f68f64a3b591331c225b51c4d22aa361cc976ebbd4",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/mac_pools/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4e597a6aff75657e0a3d56d5d1624f0a5a19ff7e351f120fc1fb4b7d0210923",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/networks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/networks/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/networks/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5045933314fcad8a8588ca46b091938a9c8b708b0fdf8951ef6e3e83a4b08b9",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/networks/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69ba323cfd973321a9e768aace91dbbbb5983dd2814598e77fb1d55ccf6b6fd3",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/permissions",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/permissions/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/permissions/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54d4344381fee5124b217646ed15a91e0553ef9823f6d9a8bc5dc37702c27703",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/permissions/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "373fe54a49924231191bba8d6f2b1a6eff00a0bcff5b73f73eef3fbc880e1f59",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/storages",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/storages/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/storages/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "884635c73374e2f521d421b727196147daee586986bed5fcb068366754d66d53",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/storages/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0222d631ed65ab1854640e857d65de182d127649043859d746419670b5b32bd4",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/tasks/create_infra.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f8c2c13ca874cd106c5df090fe27194761d5bb8c0ca832fcd2e2636da227fe9",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5e3dd05a90a3062d763151bd5fa57ba9b0d6a0d1e5371cd5117c31199c6f655",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/tasks/remove_infra.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "910de118f4e477d913ab1480854952c657ad233da673b2842d31cb1c06b653b9",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1d6fc4824b5074ccdf47dcb7cdf480443b11318eeee2f1f90b33e7b8482a550",
+ "format": 1
+ },
+ {
+ "name": "roles/remove_stale_lun",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/remove_stale_lun/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/remove_stale_lun/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0781e2354d1ca7c79e6b6915ab26643344acb2b075a10c900abaa9c717f7aa3",
+ "format": 1
+ },
+ {
+ "name": "roles/remove_stale_lun/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/remove_stale_lun/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/remove_stale_lun/examples/remove_stale_lun.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "37bb921142b647852d34092e7ed9c98627656f9d1ba9ca4e19bb7a62e029229c",
+ "format": 1
+ },
+ {
+ "name": "roles/remove_stale_lun/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/remove_stale_lun/tasks/fetch_hosts.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d648cb0fa07d757846d564fe520a5f9fa0a126e33c769481bc9c39d07eee359",
+ "format": 1
+ },
+ {
+ "name": "roles/remove_stale_lun/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd053c903c438a2c9c8c4b79e4954f6e1474fedd8902f8a4665432454412b0fb",
+ "format": 1
+ },
+ {
+ "name": "roles/remove_stale_lun/tasks/remove_mpath_device.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8af83eb6cbe20c7fb2f0ef0a9b69f6129c97b665c8b3ad3cffff77e5300116da",
+ "format": 1
+ },
+ {
+ "name": "roles/remove_stale_lun/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36614071855b57d37060c8fb0db8da56627332613c06fe01b63396095226fbd3",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db4e3a15d6e4a0b7dc391ab7892418487bb2391e453d837ee77770989101cb22",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/examples/ovirt_repositories_release_rpm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16f013b459194303f4a4b16485a9ded42c866ae244c024ef6bca5e544e1779cd",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/examples/ovirt_repositories_subscription_manager.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb5e84201ed0b91de44f606f4b2a930ce07065de4eb98ce137d41256399e1266",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7baec1da55ec214cdeaf66cb5fbdce88498268997b2a4bb5b6a3fc5a093e4e06",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks/backup-repos.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c9af33497f79b5246552693d4cdf1d67ab172049a77e4712df0e6945ef1ec14",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks/install-satellite-ca.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1a7294752c54db0db59f419718b94b4c0da63dcb06e9725ec5e03c6877ba18c",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f22d7a5ac1fd5b801067ac70b05cda17c43bb976957402693362a95908414cc3",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks/search-pool-id.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1982ab263ca68de6cf0067767b11702a2c4ab146432ed1b11b510715d7697e36",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks/rh-subscription.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68c672708d4c32fc653654add85c4942a82e4f0aecd81195d322d5d9a054287",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks/rpm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af0e42025c9423cb9f722d17ae6c2420aca003560e972a000d97328848a8c74b",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks/satellite-subscription.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73b390217602bd01ad7ab6a47e00d1bcdb94c995951f30cc27c01c1641185c6f",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f3190dd83d2c27e2cd4b4cc36b9f574075ac41cd8a62823a7a9c119c0bae624",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/engine_4.1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f01ec6b4fcfc630b4f8a139b4f30123e7805ed2976cba75dc9a3e3c380fc5db1",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/engine_4.2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ced8a355735ce4d3636dc76bc7d63a6a71834064155399f971d9cb37da3237c1",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/engine_4.3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6936324dbf3686dab7f3a0fd7586a7f1db9d56e1fcc60c8033b94522d393997e",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/engine_4.4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18f42ea5ce1dc798ee607357fa74569d8f069e7816f07ae9f79be74a9c1e91d2",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/engine_eus_4.4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddef8e1dda88e910e2a5d83f1fae2c851757de805321235ac166b1458b1a39b6",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/host_4.1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3171ba133adc54ba539e763246251b0f833dc8603d5a46243b55d82fbb80490",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/host_4.2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a97eeb8025db4ed4a5c88bf2a652f41982f48a2ce195e3c47b0990897873cd6",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/host_4.3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec3616b3d9433ef599822a6131e7d3168d5b5bb75712f0b69a1c822459cd6145",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/host_4.4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dad02834d3f927ce052a2e9180f7a809d905369691d0707342b6557934315fc5",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/host_eus_4.4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddef8e1dda88e910e2a5d83f1fae2c851757de805321235ac166b1458b1a39b6",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/host_ppc_4.4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89c1925d681185c71e2a249f30d0cc1efc885aa2339c5866b01f2459f1ddad5f",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/host_ppc_eus_4.4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b2a81f08cee1bbf2fa7431df24eb4c47697227531b05efdd6666eb3af4626b7",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/rhvh_4.1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbc95494cc017f3b7ccf608dc59b77394847929474531547fe5a6448d71d8b16",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/rhvh_4.2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbc95494cc017f3b7ccf608dc59b77394847929474531547fe5a6448d71d8b16",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/rhvh_4.3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbc95494cc017f3b7ccf608dc59b77394847929474531547fe5a6448d71d8b16",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/rhvh_4.4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe7220fb776160b30f86fe7f9b70c41ae4d26e774d14a80951bf9b91aaacaffb",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3c85a61e0991f9316f14f9dfcef8169169f4afeaeb6f62903900e15cb2aabb6",
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23ee730fc457add36b19f314667fcea6891341e5e8ce982cd64f47773b7621fe",
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/examples/shutdown_env.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "845381aee5af25a91b98ae136d2b68fe217c686e21caa74b2016405c98194d5f",
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ee1707f55bef637da0a715c984a5bcfaa70dca0662b00a4344203d8750fc453",
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1bb8523fef9d1dc2ccd7202761c9085edb675f01d3205401117be6311cd1e0e",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0cb0bfdf543b6406754a7524c94b4bded7fd7c7d0bc1648d2571843de4f904c",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/examples/ovirt_vm_infra.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d315a65145a8294f0e007f65d336c607926da12ab2621e4a60c8c627fa9f907a",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/examples/ovirt_vm_infra_inv.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "800510a76705a8b1ac6a8b94f31826c2f303aa74730e151cdfe0b3984eaa6eb7",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/affinity_groups.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99de64bdc087e561bccb1adacf980271a66654f63ce536678cade94a8b6e9ca2",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/affinity_labels.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1302469d26a335dab3169677c35ae70d96b84272e586c27786aabf7f06a5468e",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/create_inventory.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3df94becb3593f7dc9979a8ed5e740a60dc617c33b8f19f5f9f23567d9a0114",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/create_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a45f7bf1f9bcaeec819a49e26272d959cd928e674e8a2cc6d78a61ecd572f09",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f06571e7e9c4ad867ca9a9ae451511b71c27934104b926f75e39b33d4efe148b",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/manage_state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52e5f76a2c2f30da25bed6d6ebd5b355d41bbcc72c1bdcb432c7a82995634d03",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/vm_state_absent.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d5d08e9ac19af8523d7e8b0330294810e91d5ad77d4d4b67e1ccd61388ddda4",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/vm_state_present.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7225536dc041606cc96d608053e37faffdf24e2b6bb2f813d819072a7b130d07",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8c6eebe73201d32862598f55e691204600b3b3d060e61cc233ff809b19ee3c1",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "067ecccb5371a364a0f3addff4870bf1bf8a8985b8bd39dfebc75db057005e77",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/examples/ovirt_image_template.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed3ee749c3fe4ea012157bf15c4af22461c9b6707d0fe64ac75e59b98860fbe1",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/tasks/empty.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7e2509d3edfdc59f4e18438a84185b831e881a73f62ab0871de3ae1be1bf493",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5cc8ac6f2a26ea08493e276f5900489f8233faaf9c80aa6faeed9d741393a3ad",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/tasks/glance_image.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8434e458bc32c9f1da37ae5cdac4c6535d133bc55e1033d1b2773436b2bcc3e",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/tasks/qcow2_image.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcfc1af2b33cb594e8c073dc7d99673daf18a2eb9dac03240119aa6e25200d36",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1785c9a8c3028367d2ba75256fa7260c079392212151d682acd55cab7750fbc",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d4ba8182ce18c62eba9fff4737dc56a0cd779f7d41f2153bf38a08cf8898b4b",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.10.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a4cb503461461d745b3edc27f0c6dd368e7d3a9a1fe8577d06bc069a3d68adf",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.11.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68d5a192f3464327d2d51966c00ec15fa7a46173f5c6a88e79ba4f8314e5697d",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.12.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9045518911d2c9b9ea386e5cc9c9224fba94629027e3fe92d4dedf9d7a9784a9",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.13.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d84566f116944d49e6164c91d74c5e16722138e494ce40c7cdcac285d8929ea",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.9.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a4cb503461461d745b3edc27f0c6dd368e7d3a9a1fe8577d06bc069a3d68adf",
+ "format": 1
+ },
+ {
+ "name": "tests/.gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5726d3ec9335a09c124469eca039523847a6b0f08a083efaefd002b83326600",
+ "format": 1
+ },
+ {
+ "name": ".config",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".config/ansible-lint.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0eda8abb8b631980078ef5f512d080ec8e621260f6562b31b2ee145b81af36b8",
+ "format": 1
+ },
+ {
+ "name": "bindep.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3f3d58aa2576f1acb4dec85fb4826bb85ffc7db8eb1660d08b3f6592fa05dc5",
+ "format": 1
+ },
+ {
+ "name": "build.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a393ff367c0b115d57411ce71dfa0dc682226c65cfb09f06e6b12ce94c7e6d7",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "765ca9f9c6f16154569a4c220c5a893f57e4bdda19aacf37758158c3e8c231af",
+ "format": 1
+ },
+ {
+ "name": "ovirt-ansible-collection.spec.in",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b7b6cad5d26e952ae9019301d9a8c081f17972f14db523addf2a8dfd7c10875",
+ "format": 1
+ },
+ {
+ "name": "README-developers.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "81e38bf32f2a201d965eb10891068c1a56cc43e6ffd83c07f3f95442a1ab0e59",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29adaabe4887083b2cf4efcb8e18ae195ec5ec4d5c5dc7ebc7e1798827e10a97",
+ "format": 1
+ },
+ {
+ "name": "README.md.in",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b00a4c9f3df47a7507e7177abc719e6a192f82341cfb6278685e3a45eae3e635",
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "948e536d6fe99ae26067a6e1842a2947aee1094fe8baaa8cf136a578ee99b0bd",
+ "format": 1
+ },
+ {
+ "name": "ovirt-ansible-collection.spec",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "911da9e7718284248df18b810e9471a0dcfa1cc585b6a479751b6ff3440fa33e",
+ "format": 1
+ },
+ {
+ "name": "ovirt-ansible-collection-2.4.1.tar.gz",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f078cace8675eb2d9748176b44b60c6f487930fb5d3cac351c92c312cf4995ce",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/ovirt/ovirt/MANIFEST.json b/ansible_collections/ovirt/ovirt/MANIFEST.json
new file mode 100644
index 000000000..03de4327e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/MANIFEST.json
@@ -0,0 +1,37 @@
+{
+ "collection_info": {
+ "namespace": "ovirt",
+ "name": "ovirt",
+ "version": "2.4.1",
+ "authors": [
+ "Martin Necas <mnecas@redhat.com>"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "collection",
+ "infrastructure",
+ "linux",
+ "cloud",
+ "virtualization"
+ ],
+ "description": "The oVirt Ansible Collection.",
+ "license": [
+ "Apache-2.0",
+ "GPL-3.0-or-later"
+ ],
+ "license_file": null,
+ "dependencies": {},
+ "repository": "https://github.com/ovirt/ovirt-ansible-collection",
+ "documentation": null,
+ "homepage": "https://www.ovirt.org/",
+ "issues": "https://github.com/ovirt/ovirt-ansible-collection/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f934b63081cc9b57b257cf97ac3e56698357a8b724fe2ba426459ddeb7b1620",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/ovirt/ovirt/README-developers.md b/ansible_collections/ovirt/ovirt/README-developers.md
new file mode 100644
index 000000000..21a4c200f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/README-developers.md
@@ -0,0 +1,4 @@
+README for developers
+====================================
+
+You can find all information around [development](https://github.com/oVirt/ovirt-ansible-collection/wiki/Development) or [project structure](https://github.com/oVirt/ovirt-ansible-collection/wiki/Project-structure) in [wiki](https://github.com/oVirt/ovirt-ansible-collection/wiki).
diff --git a/ansible_collections/ovirt/ovirt/README.md b/ansible_collections/ovirt/ovirt/README.md
new file mode 100644
index 000000000..5cd461fd4
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/README.md
@@ -0,0 +1,27 @@
+[![Copr build status](https://copr.fedorainfracloud.org/coprs/ovirt/ovirt-master-snapshot/package/ovirt-ansible-collection/status_image/last_build.png)](https://copr.fedorainfracloud.org/coprs/ovirt/ovirt-master-snapshot/package/ovirt-ansible-collection/)
+[![Build Status](https://img.shields.io/badge/docs-latest-blue.svg)](https://docs.ansible.com/ansible/2.10/collections/ovirt/ovirt/index.html)
+
+oVirt Ansible Collection
+====================================
+
+Requirements
+------------
+
+ * Ansible core version 2.12.0 or higher
+ * Python SDK version 4.5.0 or higher
+ * Python netaddr library on the ansible controller node
+
+Upstream oVirt documentation
+--------------
+https://docs.ansible.com/ansible/latest/collections/ovirt/ovirt/index.html
+
+Downstream RHV documentation
+--------------
+https://cloud.redhat.com/ansible/automation-hub/redhat/rhv
+
+
+Licenses
+-------
+
+- Apache License 2.0
+- GNU General Public License 3.0
diff --git a/ansible_collections/ovirt/ovirt/README.md.in b/ansible_collections/ovirt/ovirt/README.md.in
new file mode 100644
index 000000000..8270467e6
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/README.md.in
@@ -0,0 +1,88 @@
+[![Build Status](https://jenkins.ovirt.org/job/oVirt_ovirt-ansible-collection_standard-check-pr/badge/icon)](https://jenkins.ovirt.org/job/oVirt_ovirt-ansible-collection_standard-check-pr/)
+[![Build Status](https://img.shields.io/badge/docs-latest-blue.svg)](https://docs.ansible.com/ansible/2.10/collections/ovirt/ovirt/index.html)
+
+oVirt Ansible Collection
+====================================
+
+The `ovirt.ovirt` manages all oVirt Ansible modules.
+
+The pypi installation is no longer supported if you want
+to install all dependencies do it manually or install the
+collection from RPM and it will be done automatically.
+
+Note
+----
+Please note that when installing this collection from Ansible Galaxy you are instructed to run following command:
+
+```bash
+$ ansible-galaxy collection install ovirt.ovirt
+```
+
+Requirements
+------------
+
+ * Ansible core version 2.12.0 or higher
+ * Python SDK version 4.5.0 or higher
+ * Python netaddr library on the ansible controller node
+
+Content of the collection
+----------------
+
+* modules:
+ * ovirt_* - Modules to manage objects in ovirt Engine
+ * ovirt_*_info - Modules to gather information about objects in ovirt Engine
+* roles:
+ * cluster_upgrade
+ * engine_setup
+ * hosted_engine_setup
+ * image_template
+ * infra
+ * repositories
+ * shutdown_env
+ * vm_infra
+ * disaster_recovery
+* inventory plugin
+
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: ovirt ansible collection
+ hosts: localhost
+ connection: local
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+ tasks:
+ - block:
+ # The use of ovirt.ovirt before ovirt_auth is to check if the collection is correctly loaded
+ - name: Obtain SSO token with using username/password credentials
+ ovirt.ovirt.ovirt_auth:
+ url: https://ovirt.example.com/ovirt-engine/api
+ username: admin@internal
+ ca_file: ca.pem
+ password: "{{ ovirt_password }}"
+
+ # Previous task generated I(ovirt_auth) fact, which you can later use
+ # in different modules as follows:
+ - ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: absent
+ name: myvm
+
+ always:
+ - name: Always revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ collections:
+ - ovirt.ovirt
+```
+
+Licenses
+-------
+
+- Apache License 2.0
+- GNU General Public License 3.0
diff --git a/ansible_collections/ovirt/ovirt/automation/README.md b/ansible_collections/ovirt/ovirt/automation/README.md
new file mode 100644
index 000000000..1b6a39975
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/automation/README.md
@@ -0,0 +1,8 @@
+Continuous Integration Scripts
+==============================
+
+This directory contains scripts for Continuous Integration provided by
+[oVirt Jenkins](http://jenkins.ovirt.org/)
+system and follows the standard defined in
+[Build and test standards](http://www.ovirt.org/CI/Build_and_test_standards)
+wiki page.
diff --git a/ansible_collections/ovirt/ovirt/automation/build.sh b/ansible_collections/ovirt/ovirt/automation/build.sh
new file mode 100755
index 000000000..cfd3faa48
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/automation/build.sh
@@ -0,0 +1,78 @@
+#!/bin/bash -xe
+
+ROOT_PATH="$PWD"
+BUILD_ROOT_PATH="/tmp"
+
+# Remove any previous artifacts
+rm -rf "$BUILD_ROOT_PATH/ansible_collections"
+rm -f "$BUILD_ROOT_PATH"/*tar.gz
+
+# Create exported-artifacts dir
+[[ -d exported-artifacts ]] || mkdir "$ROOT_PATH/exported-artifacts/"
+
+# Create builds
+./build.sh build ovirt "$BUILD_ROOT_PATH"
+./build.sh build rhv "$BUILD_ROOT_PATH"
+
+OVIRT_BUILD="$BUILD_ROOT_PATH/ansible_collections/ovirt/ovirt"
+RHV_BUILD="$BUILD_ROOT_PATH/ansible_collections/redhat/rhv"
+
+cd "$OVIRT_BUILD"
+
+# Create the src.rpm
+rpmbuild \
+ -D "_srcrpmdir $BUILD_ROOT_PATH/output" \
+ -D "_topmdir $BUILD_ROOT_PATH/rpmbuild" \
+ -ts ./*.gz
+
+# Remove the tarball so it will not be included in galaxy build
+mv ./*.gz "$ROOT_PATH/exported-artifacts/"
+
+# Overwrite github README with dynamic
+mv ./README.md.in ./README.md
+
+# Create tar for galaxy
+ansible-galaxy collection build
+
+# Create the rpms
+rpmbuild \
+ -D "_rpmdir $BUILD_ROOT_PATH/output" \
+ -D "_topmdir $BUILD_ROOT_PATH/rpmbuild" \
+ --rebuild "$BUILD_ROOT_PATH"/output/*.src.rpm
+
+cd "$RHV_BUILD"
+
+# Remove the tarball so it will not be included in automation hub build
+rm -rf ./*.gz
+
+# Overwrite github README with dynamic
+mv ./README.md.in ./README.md
+
+# create tar for automation hub
+ansible-galaxy collection build
+
+# Store any relevant artifacts in exported-artifacts for the ci system to
+# archive
+find "$BUILD_ROOT_PATH/output" -iname \*rpm -exec mv "{}" "$ROOT_PATH/exported-artifacts/" \;
+
+# Export build for Ansible Galaxy
+mv "$OVIRT_BUILD"/*tar.gz "$ROOT_PATH/exported-artifacts/"
+# Export build for Automation Hub
+mv "$RHV_BUILD"/*tar.gz "$ROOT_PATH/exported-artifacts/"
+
+COLLECTION_DIR="/usr/local/share/ansible/collections/ansible_collections/ovirt/ovirt"
+export ANSIBLE_LIBRARY="$COLLECTION_DIR/plugins/modules"
+mkdir -p $COLLECTION_DIR
+cp -r "$OVIRT_BUILD"/* "$OVIRT_BUILD"/.config "$COLLECTION_DIR"
+cd "$COLLECTION_DIR"
+
+antsibull-changelog lint -v
+ansible-lint roles/*
+
+cd "$ROOT_PATH"
+
+# If PR changed something in ./plugins or ./roles it is required to have changelog
+if [[ $(git diff --quiet HEAD^ ./plugins ./roles)$? -eq 1 && $(git diff --quiet HEAD^ ./changelogs)$? -eq 0 ]]; then
+ echo "ERROR: Please add changelog.";
+ exit 1;
+fi
diff --git a/ansible_collections/ovirt/ovirt/bindep.txt b/ansible_collections/ovirt/ovirt/bindep.txt
new file mode 100644
index 000000000..9b9844665
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/bindep.txt
@@ -0,0 +1,6 @@
+gcc [compile platform:centos-8 platform:rhel-8]
+libcurl-devel [compile platform:centos-8 platform:rhel-8]
+libxml2-devel [compile platform:centos-8 platform:rhel-8]
+openssl-devel [compile platform:centos-8 platform:rhel-8]
+python38-devel [compile platform:centos-8 platform:rhel-8]
+qemu-img [platform:centos-8 platform:rhel-8]
diff --git a/ansible_collections/ovirt/ovirt/build.sh b/ansible_collections/ovirt/ovirt/build.sh
new file mode 100755
index 000000000..8185f0e4a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/build.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+VERSION="2.4.1"
+MILESTONE=""
+RPM_RELEASE="1"
+
+BUILD_TYPE=$2
+BUILD_PATH=$3
+
+if [[ $BUILD_TYPE = "rhv" ]]; then
+COLLECTION_NAMESPACE="redhat"
+COLLECTION_NAME="rhv"
+else
+COLLECTION_NAMESPACE="ovirt"
+COLLECTION_NAME="ovirt"
+fi
+PACKAGE_NAME="ovirt-ansible-collection"
+PREFIX=/usr/local
+DATAROOT_DIR=$PREFIX/share
+COLLECTIONS_DATAROOT_DIR=$DATAROOT_DIR/ansible/collections/ansible_collections
+DOC_DIR=$DATAROOT_DIR/doc
+PKG_DATA_DIR=${PKG_DATA_DIR:-$COLLECTIONS_DATAROOT_DIR}
+PKG_DATA_DIR_ORIG=${PKG_DATA_DIR_ORIG:-$PKG_DATA_DIR}
+PKG_DOC_DIR=${PKG_DOC_DIR:-$DOC_DIR/$PACKAGE_NAME}
+
+RPM_VERSION=$VERSION
+PACKAGE_VERSION=$VERSION
+[ -n "$MILESTONE" ] && PACKAGE_VERSION+="_$MILESTONE"
+
+TARBALL="$PACKAGE_NAME-$PACKAGE_VERSION.tar.gz"
+
+dist() {
+ echo "Creating tar archive '$TARBALL' ... "
+ sed \
+ -e "s|@RPM_VERSION@|$RPM_VERSION|g" \
+ -e "s|@RPM_RELEASE@|$RPM_RELEASE|g" \
+ -e "s|@PACKAGE_NAME@|$PACKAGE_NAME|g" \
+ -e "s|@PACKAGE_VERSION@|$PACKAGE_VERSION|g" \
+ < ovirt-ansible-collection.spec.in > ovirt-ansible-collection.spec
+
+ find ./* -not -name '*.spec' -type f | tar --files-from /proc/self/fd/0 -czf "$TARBALL" ovirt-ansible-collection.spec
+ echo "tar archive '$TARBALL' created."
+}
+
+install() {
+ echo "Installing data..."
+ mkdir -p "$PKG_DATA_DIR/$COLLECTION_NAMESPACE/$COLLECTION_NAME"
+ mkdir -p "$PKG_DOC_DIR"
+
+ cp -pR plugins/ roles/ "$PKG_DATA_DIR/$COLLECTION_NAMESPACE/$COLLECTION_NAME"
+
+ if [[ $BUILD_TYPE = "rhv" ]]; then
+ echo "Creating link to ovirt.ovirt"
+ mkdir -p "$PKG_DATA_DIR/ovirt"
+ ln -f -s "$PKG_DATA_DIR_ORIG/redhat/rhv" "$PKG_DATA_DIR/ovirt/ovirt"
+ fi
+ echo "Installation done."
+}
+
+rename() {
+ echo "Renaming ovirt to $COLLECTION_NAMESPACE and ovirt to $COLLECTION_NAME"
+ find ./* -type f -exec sed -i -e "s/ovirt/$COLLECTION_NAMESPACE/g" -e "s/ovirt/$COLLECTION_NAME/g" {} \;
+}
+
+build() {
+ if [[ $BUILD_PATH ]]; then
+ BUILD_PATH="$BUILD_PATH/ansible_collections/$COLLECTION_NAMESPACE/$COLLECTION_NAME/"
+ mkdir -p "$BUILD_PATH"
+ echo "The copying files to $BUILD_PATH"
+ cp -r ./* .config/ "$BUILD_PATH"
+ cd "$BUILD_PATH"
+ rename
+ dist
+ fi
+}
+
+$1
diff --git a/ansible_collections/ovirt/ovirt/changelogs/README.md b/ansible_collections/ovirt/ovirt/changelogs/README.md
new file mode 100644
index 000000000..0296e19e9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/changelogs/README.md
@@ -0,0 +1,28 @@
+# Fragments
+
+## Content of fragmnet
+
+### Example
+
+```yaml
+---
+minor_changes:
+ - ovirt_disk - Add backup (https://github.com/oVirt/ovirt-ansible-collection/pull/57).
+
+```
+
+### Types
+
+- major_changes
+- minor_changes
+- bugfixes
+- breaking_changes
+- deprecated_features
+- removed_features
+- security_fixes
+
+## Commands
+
+`antsibull-changelog lint`
+
+`antsibull-changelog release`
diff --git a/ansible_collections/ovirt/ovirt/changelogs/changelog.yaml b/ansible_collections/ovirt/ovirt/changelogs/changelog.yaml
new file mode 100644
index 000000000..e4b508409
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/changelogs/changelog.yaml
@@ -0,0 +1,897 @@
+ancestor: null
+releases:
+ 1.0.0:
+ changes:
+ bugfixes:
+ - ovirt_snapshot - Disk id was incorrectly set as disk_snapshot_id (https://github.com/oVirt/ovirt-ansible-collection/pull/5).
+ - ovirt_storage_domain - Fix update_check warning_low_space (https://github.com/oVirt/ovirt-ansible-collection/pull/10).
+ - ovirt_vm - Remove deprecated warning of boot params (https://github.com/oVirt/ovirt-ansible-collection/pull/3).
+ minor_changes:
+ - ovirt_cluster - Add migration_encrypted option (https://github.com/oVirt/ovirt-ansible-collection/pull/17).
+ - ovirt_vm - Add bios_type (https://github.com/oVirt/ovirt-ansible-collection/pull/15).
+ fragments:
+ - 10-ovirt_storage_domain-fix-update_check-warning_low_space.yaml
+ - 15-ovirt_vm-add-bios_type.yaml
+ - 17-ovirt_cluster-add-migration_encrypted.yaml
+ - 3-ovirt_vm-remove-deprecated-warning-boot-params.yaml
+ - 5-ovirt_snapshot-disk id-was-incorrectly set-as-disk_snapshot_id.yaml
+ modules:
+ - description: Module to manage affinity groups in oVirt/RHV
+ name: ovirt_affinity_group
+ namespace: ''
+ - description: Module to manage affinity labels in oVirt/RHV
+ name: ovirt_affinity_label
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV affinity labels
+ name: ovirt_affinity_label_info
+ namespace: ''
+ - description: Retrieve information about the oVirt/RHV API
+ name: ovirt_api_info
+ namespace: ''
+ - description: Module to manage authentication to oVirt/RHV
+ name: ovirt_auth
+ namespace: ''
+ - description: Module to manage clusters in oVirt/RHV
+ name: ovirt_cluster
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV clusters
+ name: ovirt_cluster_info
+ namespace: ''
+ - description: Module to manage data centers in oVirt/RHV
+ name: ovirt_datacenter
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV datacenters
+ name: ovirt_datacenter_info
+ namespace: ''
+ - description: Module to manage Virtual Machine and floating disks in oVirt/RHV
+ name: ovirt_disk
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV disks
+ name: ovirt_disk_info
+ namespace: ''
+ - description: Create or delete an event in oVirt/RHV
+ name: ovirt_event
+ namespace: ''
+ - description: This module can be used to retrieve information about one or more
+ oVirt/RHV events
+ name: ovirt_event_info
+ namespace: ''
+ - description: Module to manage external providers in oVirt/RHV
+ name: ovirt_external_provider
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV external providers
+ name: ovirt_external_provider_info
+ namespace: ''
+ - description: Module to manage groups in oVirt/RHV
+ name: ovirt_group
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV groups
+ name: ovirt_group_info
+ namespace: ''
+ - description: Module to manage hosts in oVirt/RHV
+ name: ovirt_host
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV hosts
+ name: ovirt_host_info
+ namespace: ''
+ - description: Module to manage host networks in oVirt/RHV
+ name: ovirt_host_network
+ namespace: ''
+ - description: Module to manage power management of hosts in oVirt/RHV
+ name: ovirt_host_pm
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV HostStorages (applicable
+ only for block storage)
+ name: ovirt_host_storage_info
+ namespace: ''
+ - description: Module to manage Instance Types in oVirt/RHV
+ name: ovirt_instance_type
+ namespace: ''
+ - description: Module to manage jobs in oVirt/RHV
+ name: ovirt_job
+ namespace: ''
+ - description: Module to manage MAC pools in oVirt/RHV
+ name: ovirt_mac_pool
+ namespace: ''
+ - description: Module to manage logical networks in oVirt/RHV
+ name: ovirt_network
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV networks
+ name: ovirt_network_info
+ namespace: ''
+ - description: Module to manage network interfaces of Virtual Machines in oVirt/RHV
+ name: ovirt_nic
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV virtual machine
+ network interfaces
+ name: ovirt_nic_info
+ namespace: ''
+ - description: Module to manage permissions of users/groups in oVirt/RHV
+ name: ovirt_permission
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV permissions
+ name: ovirt_permission_info
+ namespace: ''
+ - description: Module to manage datacenter quotas in oVirt/RHV
+ name: ovirt_quota
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV quotas
+ name: ovirt_quota_info
+ namespace: ''
+ - description: Module to manage roles in oVirt/RHV
+ name: ovirt_role
+ namespace: ''
+ - description: Retrieve information about one or more oVirt scheduling policies
+ name: ovirt_scheduling_policy_info
+ namespace: ''
+ - description: Module to manage Virtual Machine Snapshots in oVirt/RHV
+ name: ovirt_snapshot
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV virtual machine
+ snapshots
+ name: ovirt_snapshot_info
+ namespace: ''
+ - description: Module to manage storage connections in oVirt
+ name: ovirt_storage_connection
+ namespace: ''
+ - description: Module to manage storage domains in oVirt/RHV
+ name: ovirt_storage_domain
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV storage domains
+ name: ovirt_storage_domain_info
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV templates relate
+ to a storage domain.
+ name: ovirt_storage_template_info
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV virtual machines
+ relate to a storage domain.
+ name: ovirt_storage_vm_info
+ namespace: ''
+ - description: Module to manage tags in oVirt/RHV
+ name: ovirt_tag
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV tags
+ name: ovirt_tag_info
+ namespace: ''
+ - description: Module to manage virtual machine templates in oVirt/RHV
+ name: ovirt_template
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV templates
+ name: ovirt_template_info
+ namespace: ''
+ - description: Module to manage users in oVirt/RHV
+ name: ovirt_user
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV users
+ name: ovirt_user_info
+ namespace: ''
+ - description: Module to manage Virtual Machines in oVirt/RHV
+ name: ovirt_vm
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV virtual machines
+ name: ovirt_vm_info
+ namespace: ''
+ - description: Module to manage VM pools in oVirt/RHV
+ name: ovirt_vmpool
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV vmpools
+ name: ovirt_vmpool_info
+ namespace: ''
+ - description: Module to manage vNIC profile of network in oVirt/RHV
+ name: ovirt_vnic_profile
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV vnic profiles
+ name: ovirt_vnic_profile_info
+ namespace: ''
+ plugins:
+ inventory:
+ - description: oVirt inventory source
+ name: ovirt
+ namespace: null
+ release_date: '2020-04-09'
+ 1.1.0:
+ changes:
+ bugfixes:
+ - ovirt_disk - Fix activate (https://github.com/oVirt/ovirt-ansible-collection/pull/61).
+ - ovirt_host_network - Fix custom_properties default value (https://github.com/oVirt/ovirt-ansible-collection/pull/65).
+ - ovirt_quota - Fix vcpu_limit (https://github.com/oVirt/ovirt-ansible-collection/pull/44).
+ - ovirt_vm - Fix cd_iso get all disks from storage domains (https://github.com/oVirt/ovirt-ansible-collection/pull/66).
+ - ovirt_vm - Fix cd_iso search by name (https://github.com/oVirt/ovirt-ansible-collection/pull/51).
+ major_changes:
+ - ovirt_disk - Add backup (https://github.com/oVirt/ovirt-ansible-collection/pull/57).
+ - ovirt_disk - Support direct upload/download (https://github.com/oVirt/ovirt-ansible-collection/pull/35).
+ - ovirt_host - Add ssh_port (https://github.com/oVirt/ovirt-ansible-collection/pull/60).
+ - ovirt_vm_os_info - Creation of module (https://github.com/oVirt/ovirt-ansible-collection/pull/26).
+ minor_changes:
+ - ovirt inventory - Add creation_time (https://github.com/oVirt/ovirt-ansible-collection/pull/34).
+ - ovirt inventory - Set inventory plugin insecure if no cafile defined (https://github.com/oVirt/ovirt-ansible-collection/pull/58).
+ - ovirt_disk - Add upload image warning for correct format (https://github.com/oVirt/ovirt-ansible-collection/pull/22).
+ - ovirt_disk - Force wait when uploading disk (https://github.com/oVirt/ovirt-ansible-collection/pull/43).
+ - ovirt_disk - Upload_image_path autodetect size (https://github.com/oVirt/ovirt-ansible-collection/pull/19).
+ - ovirt_network - Add support of removing vlan_tag (https://github.com/oVirt/ovirt-ansible-collection/pull/21).
+ - ovirt_vm - Add documentation for custom_script under sysprep (https://github.com/oVirt/ovirt-ansible-collection/pull/52).
+ - ovirt_vm - Hard code nic on_boot to true (https://github.com/oVirt/ovirt-ansible-collection/pull/45).
+ fragments:
+ - 19-ovirt_disk-upload_image_path-autodetect-size.yaml
+ - 21-ovirt_network-add-support-of-removing-vlan_tag.yaml
+ - 22-ovirt_disk-add-upload-image-warning-for-correct-format.yaml
+ - 26-add-ovirt_vm_os_info.yaml
+ - 34-ovirt-inventory-add-creation_time.yaml
+ - 35-ovirt_disk-support-direct-upload-download.yaml
+ - 43-ovirt_disk-force-wait-when-uploading-disk.yaml
+ - 44-ovirt_quota-fix-vcpu_limit-type.yaml
+ - 45-ovirt_vm-hard-code-nic-on_boot-to-true.yaml
+ - 51-ovirt_vm-fix-cd_iso-search-by-name.yaml
+ - 52-ovirt_vm-add-documentation-for-custom_script-under-sysprep.yaml
+ - 57-ovirt_disk-add-backup.yaml
+ - 58-ovirt-inventory-insecure-if-no-cafile-defined.yaml
+ - 60-ovirt_host-add-ssh_port.yaml
+ - 61-ovirt_disk-fix-activate.yaml
+ - 65-ovirt_host_network-fix-custom_properties-default-value.yaml
+ - 66-ovirt_vm-fix-cd_iso-get-all-disks-from-storage-domains.yaml
+ modules:
+ - description: Retrieve information on all supported oVirt/RHV operating systems
+ name: ovirt_vm_os_info
+ namespace: ''
+ release_date: '2020-08-12'
+ 1.1.1:
+ changes:
+ minor_changes:
+ - ovirt_permission - Fix FQCN documentation (https://github.com/oVirt/ovirt-ansible-collection/pull/63).
+ release_date: '2020-08-12'
+ 1.1.2:
+ release_date: '2020-08-17'
+ 1.2.0:
+ changes:
+ bugfixes:
+ - 01_create_target_hosted_engine_vm - Force basic authentication (https://github.com/oVirt/ovirt-ansible-collection/pull/131).
+ - hosted_engine_setup - Allow uppercase characters in mac address (https://github.com/oVirt/ovirt-ansible-collection/pull/150).
+ - hosted_engine_setup - set custom bios type of hosted-engine VM to Q35+SeaBIOS
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/129).
+ - hosted_engine_setup - use zcat instead of gzip (https://github.com/oVirt/ovirt-ansible-collection/pull/130).
+ - ovirt inventory - Add close of connection at the end (https://github.com/oVirt/ovirt-ansible-collection/pull/122).
+ - ovirt_disk - dont move disk when already in storage_domain (https://github.com/oVirt/ovirt-ansible-collection/pull/135)
+ - ovirt_disk - fix upload when direct upload fails (https://github.com/oVirt/ovirt-ansible-collection/pull/120).
+ - ovirt_vm - Fix template search (https://github.com/oVirt/ovirt-ansible-collection/pull/132).
+ - ovirt_vm - Rename q35_sea to q35_sea_bios (https://github.com/oVirt/ovirt-ansible-collection/pull/111).
+ major_changes:
+ - cluster_upgrade - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/94).
+ - disaster_recovery - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/134).
+ - engine_setup - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/69).
+ - hosted_engine_setup - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/106).
+ - image_template - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/95).
+ - infra - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/92).
+ - manageiq - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/97).
+ - repositories - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/96).
+ - shutdown_env - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/112).
+ - vm_infra - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/93).
+ minor_changes:
+ - Add GPL license (https://github.com/oVirt/ovirt-ansible-collection/pull/101).
+ - hosted_engine_setup - Add compatibility_version (https://github.com/oVirt/ovirt-ansible-collection/pull/125).
+ - ovirt_disk - ignore move of HE disks (https://github.com/oVirt/ovirt-ansible-collection/pull/162).
+ - ovirt_nic - Add template_version (https://github.com/oVirt/ovirt-ansible-collection/pull/145).
+ - ovirt_nic_info - Add template (https://github.com/oVirt/ovirt-ansible-collection/pull/146).
+ - ovirt_vm_info - Add current_cd (https://github.com/oVirt/ovirt-ansible-collection/pull/144).
+ fragments:
+ - add-cluster_upgrade-role.yml
+ - add-disaster_recovery-role.yml
+ - add-engine_setup-role.yml
+ - add-gpl-license.yml
+ - add-hosted_engine_setup-role.yml
+ - add-image_template-role.yml
+ - add-infra-role.yml
+ - add-manageiq-role.yml
+ - add-repositories-role.yml
+ - add-shutdown_env-role.yml
+ - add-vm_infra-role.yml
+ - basic_auth-fix_create_target_hosted_engine_vm.yml
+ - hosted_engine_setup-add-compatibility_version.yml
+ - hosted_engine_setup-allow-uppercase-in-mac-address.yml
+ - hosted_engine_setup-set-custom-bios-type.yml
+ - hosted_engine_setup-use-zcat-instead-of-gzip.yml
+ - ovirt-inventory-add-connection-close.yml
+ - ovirt_disk-fix-move.yml
+ - ovirt_disk-fix-upload-when-direct-upload-fails.yml
+ - ovirt_disk-ignore-he-disk-move.yml
+ - ovirt_nic-add-template_version.yml
+ - ovirt_nic_info-add-template.yml
+ - ovirt_vm-fix-template-search.yml
+ - ovirt_vm-rename-q35_sea.yml
+ - ovirt_vm_info-add-current_cd.yml
+ release_date: '2020-10-27'
+ 1.2.1:
+ changes:
+ bugfixes:
+ - disaster_recovery - Fix multiple configuration issues like paths, "~" support,
+ user input messages, etc. (https://github.com/oVirt/ovirt-ansible-collection/pull/160).
+ fragments:
+ - disaster_recovery-fix-configuration-issues.yml
+ release_date: '2020-11-02'
+ 1.2.2:
+ changes:
+ bugfixes:
+ - hosted_engine_setup - Clean VNC encryption config (https://github.com/oVirt/ovirt-ansible-collection/pull/175/).
+ - inventory plugin - Fix timestamp for Python 2 (https://github.com/oVirt/ovirt-ansible-collection/pull/173).
+ fragments:
+ - hosted_engine_setup-clean-vnc-encryption-config.yml
+ - inventory-plugin-fix-python2-timestamp-issue.yml
+ release_date: '2020-11-12'
+ 1.2.3:
+ changes:
+ minor_changes:
+ - engine_setup - Add missing restore task file and vars file (https://github.com/oVirt/ovirt-ansible-collection/pull/180).
+ - hosted_engine_setup - Add after_add_host hook (https://github.com/oVirt/ovirt-ansible-collection/pull/181).
+ fragments:
+ - engine_setup-add-missing-restore-file.yml
+ - he_add-after_add_host-hook.yml
+ release_date: '2020-11-30'
+ 1.2.4:
+ changes:
+ minor_changes:
+ - infra - don't require passowrd for user (https://github.com/oVirt/ovirt-ansible-collection/pull/195).
+ - inventory - correct os_type name (https://github.com/oVirt/ovirt-ansible-collection/pull/194).
+ - ovirt_disk - automatically detect virtual size of qcow image (https://github.com/oVirt/ovirt-ansible-collection/pull/183).
+ fragments:
+ - 183-ovirt_disk-fix-upload-detection.yml
+ - 194-inventory-correct-name-of-os_type.yml
+ - 195-infra-dont-require-password-for-user.yml
+ release_date: '2020-12-14'
+ 1.3.0:
+ changes:
+ major_changes:
+ - ovirt_system_option_info - Add new module (https://github.com/oVirt/ovirt-ansible-collection/pull/206).
+ minor_changes:
+ - ansible-builder - Update bindep (https://github.com/oVirt/ovirt-ansible-collection/pull/197).
+ - hosted_engine_setup - Collect all engine /var/log (https://github.com/oVirt/ovirt-ansible-collection/pull/202).
+ - hosted_engine_setup - Use ovirt_system_option_info instead of REST API (https://github.com/oVirt/ovirt-ansible-collection/pull/209).
+ - ovirt_disk - Add install warning (https://github.com/oVirt/ovirt-ansible-collection/pull/208).
+ - ovirt_info - Fragment add auth suboptions to documentation (https://github.com/oVirt/ovirt-ansible-collection/pull/205).
+ fragments:
+ - 197-update-bindep.yml
+ - 202-hosted_engine_setup-collect-all-engine-log.yml
+ - 205-ovirt_info-fragment-add-auth-suboptions-docs.yml
+ - 206-add-ovirt_system_option_info.yml
+ - 208-ovirt_disk-add-install-warning.yml
+ - 209-hosted_engine_setup-use-ovirt_system_option_info.yml
+ release_date: '2021-01-28'
+ 1.3.1:
+ changes:
+ minor_changes:
+ - hosted_engine_setup - Disable reboot_after_installation (https://github.com/oVirt/ovirt-ansible-collection/pull/218).
+ - ovirt_host - Add reboot_after_installation option (https://github.com/oVirt/ovirt-ansible-collection/pull/217).
+ fragments:
+ - hosted_engine_setup-disable-reboot-after-install.yml
+ - ovirt_host-add-reboot_after_installation.yml
+ release_date: '2021-02-10'
+ 1.4.0:
+ changes:
+ bugfixes:
+ - Set ``auth`` options into argument spec definition so Ansible will validate
+ the user options
+ - Set ``no_log`` on ``password`` and ``token`` in the ``auth`` dict so the values
+ are exposed in the invocation log
+ minor_changes:
+ - cluster_upgrade - Add correlation-id header (https://github.com/oVirt/ovirt-ansible-collection/pull/222).
+ - engine_setup - Add skip renew pki confirm (https://github.com/oVirt/ovirt-ansible-collection/pull/228).
+ - examples - Add recipe for removing DM device (https://github.com/oVirt/ovirt-ansible-collection/pull/233).
+ - hosted_engine_setup - Filter devices with unsupported bond mode (https://github.com/oVirt/ovirt-ansible-collection/pull/226).
+ - infra - Add reboot host parameters (https://github.com/oVirt/ovirt-ansible-collection/pull/231).
+ - ovirt_disk - Add SATA support (https://github.com/oVirt/ovirt-ansible-collection/pull/225).
+ - ovirt_user - Add ssh_public_key (https://github.com/oVirt/ovirt-ansible-collection/pull/232)
+ fragments:
+ - 222-cluster_upgrade-add-correlation-id.yml
+ - 225-ovirt_disk-add-sata.yml
+ - 226-hosted_engine_setup-filter-devices.yml
+ - 228-add-skip-renew-pki.yml
+ - 231-infra-add-reboot-params.yml
+ - 232-ovirt_user-add-ssh_public_key.yml
+ - 233-examples-add-recipe-for-removing-DM-device.yml
+ - auth_dict.yml
+ release_date: '2021-03-16'
+ 1.4.1:
+ changes:
+ bugfixes:
+ - hosted_engine_setup - Fix auth revoke (https://github.com/oVirt/ovirt-ansible-collection/pull/237).
+ fragments:
+ - 237-hosted_engine_setup-fix-auth_revoke.yml
+ release_date: '2021-03-22'
+ 1.4.2:
+ changes:
+ minor_changes:
+ - hosted_engine_setup - Add an error message for FIPS on CentOS (https://github.com/oVirt/ovirt-ansible-collection/pull/250).
+ - hosted_engine_setup - Fix the appliance distribution (https://github.com/oVirt/ovirt-ansible-collection/pull/249).
+ - infra - remove target from ovirt_storage_connection (https://github.com/oVirt/ovirt-ansible-collection/pull/252).
+ - ovirt_vm - Allow migration between clusters (https://github.com/oVirt/ovirt-ansible-collection/pull/236).
+ - repositories - Add host ppc (https://github.com/oVirt/ovirt-ansible-collection/pull/248).
+ - repositories - Remove ansible channels from RHV 4.4 (https://github.com/oVirt/ovirt-ansible-collection/pull/242).
+ - repositories - fix ppc repos (https://github.com/oVirt/ovirt-ansible-collection/pull/254).
+ fragments:
+ - 236-ovirt_vm-allow-cluster-migration.yml
+ - 242-repositories-remove-ansible-channels-from-RHV-4.4.yml
+ - 248-repositories-add-host-ppc.yml
+ - 249-hosted_engine_setup-fix-the-appliance-distribution.yml
+ - 250-hosted_engine_setup-add-an-error-message-for-FIPS-on-centos.yml
+ - 252-infra-remove-target-from-ovirt_storage_connection.yml
+ - 254-repositories-fix-ppc-repos.yml
+ release_date: '2021-04-23'
+ 1.5.0:
+ changes:
+ bugfixes:
+ - hosted_engine_setup - Use ovirt_host module to discover iscsi (https://github.com/oVirt/ovirt-ansible-collection/pull/275).
+ - hosted_engine_setup - align with ansible-lint 5.0.0 (https://github.com/oVirt/ovirt-ansible-collection/pull/271).
+ minor_changes:
+ - disaster_recovery - Change conf paths (https://github.com/oVirt/ovirt-ansible-collection/pull/286).
+ - hosted_engine_setup - Add-pause-option-before-engine-setup (https://github.com/oVirt/ovirt-ansible-collection/pull/273).
+ - hosted_engine_setup - Remove leftover code and omit parameters (https://github.com/oVirt/ovirt-ansible-collection/pull/281).
+ - infra - Storage fix parameters typo (https://github.com/oVirt/ovirt-ansible-collection/pull/282).
+ - ovirt_host - Update iscsi target struct (https://github.com/oVirt/ovirt-ansible-collection/pull/274).
+ fragments:
+ - 274-ovirt_host-update-iscsi-target-struct.yml
+ - 282-infra-storage-fix-parameters-typo.yml
+ - 286-dr-change-conf-paths.yml
+ - hosted_engine_setup-add-pause-option-before-engine-setup.yml
+ - hosted_engine_setup-ansible-lint-5.0.0-alignment.yml
+ - hosted_engine_setup-remove-leftovers-code.yml
+ - hosted_engine_setup-use-ovirt_host-module-to-discover-iscsi.yml
+ release_date: '2021-06-04'
+ 1.5.1:
+ changes:
+ bugfixes:
+ - hosted_engine_setup - Filter VLAN devices with bad names (https://github.com/oVirt/ovirt-ansible-collection/pull/238)
+ - hosted_engine_setup - Remove cloud-init configuration (https://github.com/oVirt/ovirt-ansible-collection/pull/295).
+ - ovirt inventory plugin - allow several valid values for the `plugin` key (https://github.com/oVirt/ovirt-ansible-collection/pull/293).
+ minor_changes:
+ - hosted_engine_setup - use-ansible-host (https://github.com/oVirt/ovirt-ansible-collection/pull/277).
+ - infra role - Add external_provider parameter on networks role of infra role
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/297)
+ - ovirt_vm - Add placement_policy_hosts (https://github.com/oVirt/ovirt-ansible-collection/pull/294).
+ fragments:
+ - 238-filter-vlan-devices-with-bad-names.yml
+ - 294-ovirt_vm-add-placement_policy_hosts.yml
+ - 297-ovirt_infra_networks-add-external-provider.yml
+ - hosted_engine_setup-add-host-use_IP.yml
+ - hosted_engine_setup-remove-cloud-init-config.yml
+ - inventory_plugin_key_choices.yml
+ release_date: '2021-06-17'
+ 1.5.2:
+ changes:
+ minor_changes:
+ - hosted_engine_setup - Do not try to sync at end of full_execution (https://github.com/oVirt/ovirt-ansible-collection/pull/305)
+ - ovirt_vm - Add default return value to check_placement_policy (https://github.com/oVirt/ovirt-ansible-collection/pull/301).
+ fragments:
+ - 301-ovirt_vm-add-default-to-check_placement_policy.yml
+ - 305-HE-do-not-sync-at-full-execution-end.yml
+ release_date: '2021-06-23'
+ 1.5.3:
+ changes:
+ minor_changes:
+ - Don't rely on safe_eval being able to do math/concat (https://github.com/oVirt/ovirt-ansible-collection/pull/307)
+ - hosted_engine_setup - Fix engine vm add_host for the target machine (https://github.com/oVirt/ovirt-ansible-collection/pull/311)
+ - hosted_engine_setup - Minor doc update (https://github.com/oVirt/ovirt-ansible-collection/pull/310)
+ fragments:
+ - 307-safe-eval-no-concat.yml
+ - 310-HE-minor-doc-update.yml
+ - 311-fix-add-host-target-vm.yml
+ release_date: '2021-06-25'
+ 1.5.4:
+ changes:
+ bugfixes:
+ - hosted_engine_setup - Use forward network during an IPv6 deployment (https://github.com/oVirt/ovirt-ansible-collection/pull/315)
+ - hosted_engine_setup - remove duplicate tasks (https://github.com/oVirt/ovirt-ansible-collection/pull/314)
+ - ovirt_permission - fix group search that has space in it's name (https://github.com/oVirt/ovirt-ansible-collection/pull/318)
+ minor_changes:
+ - hosted_engine_setup - Allow FIPS on HE VM (https://github.com/oVirt/ovirt-ansible-collection/pull/313)
+ fragments:
+ - 313-allow-fips-on-HE-vm.yml
+ - 314-remove-duplicate-tasks.yml
+ - 315-use-forward-netrok-during-ipv6-deployment.yml
+ - 318-ovirt_permission-fix-group-search-that-has-space.yml
+ release_date: '2021-07-22'
+ 1.5.5:
+ changes:
+ bugfixes:
+ - ovirt_auth - Fix password and username requirements (https://github.com/oVirt/ovirt-ansible-collection/pull/325).
+ - ovirt_disk - Fix update_check with no VM (https://github.com/oVirt/ovirt-ansible-collection/pull/323).
+ major_changes:
+ - remove_stale_lun - Add role for removing stale LUN (https://bugzilla.redhat.com/1966873).
+ minor_changes:
+ - engine_setup - Wait for webserver up after engine-config reboot (https://github.com/oVirt/ovirt-ansible-collection/pull/324).
+ - hosted_engine_setup - Pause deployment on failure of `engine-backup --mode=restore`
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/327).
+ - hosted_engine_setup - Text change - Consistently use 'bootstrap engine VM'
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/328).
+ - hosted_engine_setup - Update Ansible requirements in README (https://github.com/oVirt/ovirt-ansible-collection/pull/321)
+ - readme - Update Ansible requirement (https://github.com/oVirt/ovirt-ansible-collection/pull/326).
+ fragments:
+ - 321-hosted_engine_setup-update-ansible-requirements-in-readme.yml
+ - 323-ovirt_disk-fix-update_check-with-no-vm.yml
+ - 324-engine_setup-wait-for-engine-config-reboot.yml
+ - 325-ovirt_auth-fix-passowrd-and-username-req.yml
+ - 326-readme-update-ansible-requirements.yml
+ - 327-pause-on-restore.yml
+ - 328-bootstrap-texts.yml
+ - add-remove_stale_lun-role.yml
+ release_date: '2021-08-11'
+ 1.6.1:
+ changes:
+ bugfixes:
+ - hosted_engine_setup - Use default bridge for IPv6 advertisements (https://github.com/oVirt/ovirt-ansible-collection/pull/331)
+ - ovirt_auth - Fix token no_log (https://github.com/oVirt/ovirt-ansible-collection/pull/332).
+ fragments:
+ - 331-use-default-bridge-for-ipv6-advertisements.yml
+ - 332-ovirt_auth-fix-token-no_log.yml
+ release_date: '2021-08-25'
+ 1.6.2:
+ changes:
+ minor_changes:
+ - remove_stale_lun - Fix example for `remote_stale_lun` role to be able to run
+ it from engine (https://github.com/oVirt/ovirt-ansible-collection/pull/334).
+ fragments:
+ - 334-remove_stale_lun-role.yml
+ release_date: '2021-08-26'
+ 2.0.0:
+ changes:
+ bugfixes:
+ - hosted_engine_setup - Add OpenSCAP security profile name parameter (https://github.com/oVirt/ovirt-ansible-collection/pull/411).
+ - hosted_engine_setup - Add an option to set the storage format when createing
+ a storage domain and use it (https://github.com/oVirt/ovirt-ansible-collection/pull/463).
+ - hosted_engine_setup - Adjust files permissions (https://github.com/oVirt/ovirt-ansible-collection/pull/409).
+ - hosted_engine_setup - Fix call to engine-psql for vds_spm_id (https://github.com/oVirt/ovirt-ansible-collection/pull/459).
+ - hosted_engine_setup - Fix cloud-init package removal in airgapped environment
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/442)
+ - hosted_engine_setup - Remove SPICE graphic protocol (https://github.com/oVirt/ovirt-ansible-collection/pull/394).
+ - hosted_engine_setup - Replace xml community module (https://github.com/oVirt/ovirt-ansible-collection/pull/438).
+ - hosted_engine_setup - Support disa stig profile (https://github.com/oVirt/ovirt-ansible-collection/pull/426).
+ - hosted_engine_setup - Use cat command (https://github.com/oVirt/ovirt-ansible-collection/pull/443).
+ - hosted_engine_setup - Use tpgt in iscsi login (https://github.com/oVirt/ovirt-ansible-collection/pull/338)
+ - image_template - Remove static no - unsupported in ansible 2.12 (https://github.com/oVirt/ovirt-ansible-collection/pull/341).
+ - ovirt_host - Fix failed_state_after_reinstall condition (https://github.com/oVirt/ovirt-ansible-collection/pull/371).
+ - ovirt_template - Fix creating templates where the base template version number
+ is not 1 (https://github.com/oVirt/ovirt-ansible-collection/pull/370).
+ - repositories - Fix dnf module variable (https://github.com/oVirt/ovirt-ansible-collection/pull/454).
+ - repositories - fix force flag on subscription-manager (https://github.com/oVirt/ovirt-ansible-collection/pull/430).
+ major_changes:
+ - manageiq - role removed (https://github.com/oVirt/ovirt-ansible-collection/pull/375).
+ minor_changes:
+ - Add json_query filter (https://github.com/oVirt/ovirt-ansible-collection/pull/436).
+ - cluster_upgrade - Add progress tracking via event logs to the role (https://github.com/oVirt/ovirt-ansible-collection/pull/415)
+ - cluster_upgrade - Directly log progress to the cluster (https://github.com/oVirt/ovirt-ansible-collection/pull/449)
+ - engine_setup - Honor ovirt_engine_setup_offline variable (https://github.com/oVirt/ovirt-ansible-collection/pull/381).
+ - engine_setup - Prepare answer files and default values for 4.5 release (https://github.com/oVirt/ovirt-ansible-collection/pull/414).
+ - gluster_heal_info - Replacing gluster module to CLI to support RHV automation
+ hub (https://github.com/oVirt/ovirt-ansible-collection/pull/340).
+ - hosted_engine - Replace virt_net and xml with commands (https://github.com/oVirt/ovirt-ansible-collection/pull/359).
+ - hosted_engine_setup - Fix default gateway variable name (https://github.com/oVirt/ovirt-ansible-collection/pull/423).
+ - hosted_engine_setup - Fix default gateway variable name (https://github.com/oVirt/ovirt-ansible-collection/pull/423).
+ - hosted_engine_setup - Fix permissions on copied engine logs, needed for OpenSCAP
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/404).
+ - hosted_engine_setup - Honor he_offline_deployment variable (https://github.com/oVirt/ovirt-ansible-collection/pull/380).
+ - hosted_engine_setup - Replace calls to psql as postgres with engine_psql.sh
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/453).
+ - hosted_engine_setup - configured abrt initial files only when needed (https://github.com/oVirt/ovirt-ansible-collection/pull/397).
+ - info - Rename follows to follow parameter and add alias (https://github.com/oVirt/ovirt-ansible-collection/pull/367).
+ - info - bump deprecate version for fetch_nested and nested_attributes (https://github.com/oVirt/ovirt-ansible-collection/pull/378).
+ - info modules - Add follow link url to api model links_summary
+ - info modules - Enable follow parameter (https://github.com/oVirt/ovirt-ansible-collection/pull/355).
+ - manageiq - add deprecation info (https://github.com/oVirt/ovirt-ansible-collection/pull/384).
+ - ovirt_disk - Add warning for disk attachments (https://github.com/oVirt/ovirt-ansible-collection/pull/347).
+ - ovirt_disk - Use imageio client (https://github.com/oVirt/ovirt-ansible-collection/pull/358).
+ - ovirt_event - enable correlation_id on events (https://github.com/oVirt/ovirt-ansible-collection/pull/368).
+ - ovirt_host - Add enroll_certificate (https://github.com/oVirt/ovirt-ansible-collection/pull/439).
+ - ovirt_permission - add mac pool (https://github.com/oVirt/ovirt-ansible-collection/pull/353).
+ - ovirt_remove_stale_lun - Allow user to remove multiple LUNs (https://github.com/oVirt/ovirt-ansible-collection/pull/357).
+ - ovirt_remove_stale_lun - Retry "multipath -f" while removing the LUNs (https://github.com/oVirt/ovirt-ansible-collection/pull/382).
+ - ovirt_remove_stale_lun - Use add_host instead of delegate_to (https://github.com/oVirt/ovirt-ansible-collection/pull/390).
+ - ovirt_storage_template_info - fix docs (https://github.com/oVirt/ovirt-ansible-collection/pull/356).
+ - ovirt_storage_vm_info - fix docs (https://github.com/oVirt/ovirt-ansible-collection/pull/356).
+ - ovirt_template - Add ova import of template (https://github.com/oVirt/ovirt-ansible-collection/pull/304).
+ - ovirt_template - add boot_menu and bios_type https://github.com/oVirt/ovirt-ansible-collection/pull/465).
+ - ovirt_vm - Add display file_transfer_enabled and copy_paste_enabled (https://github.com/oVirt/ovirt-ansible-collection/pull/339).
+ - ovirt_vm - Add virtio_scsi_enabled and multi_queues_enabled (https://github.com/oVirt/ovirt-ansible-collection/pull/348).
+ - ovirt_vm - Add virtio_scsi_multi_queues (https://github.com/oVirt/ovirt-ansible-collection/pull/373).
+ - plugins - Remove unused imports (https://github.com/oVirt/ovirt-ansible-collection/pull/444).
+ - repositories - Add to the documentation variable priority (https://github.com/oVirt/ovirt-ansible-collection/pull/440).
+ - repositories - Replace redhat_subscription and rhsm_repository with command
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/346).
+ - repositories - Update host and engine repositories to 4.4.9 (https://github.com/oVirt/ovirt-ansible-collection/pull/363).
+ - repositories - add no_log to register (https://github.com/oVirt/ovirt-ansible-collection/pull/350).
+ - repositories - add satelite support (https://github.com/oVirt/ovirt-ansible-collection/pull/431).
+ - vm_infra - Add no_log to Manage VMs state task (https://github.com/oVirt/ovirt-ansible-collection/pull/417).
+ fragments:
+ - 304-ovirt_template-add-ova-import.yml
+ - 338-use-tpgt-in-iscsi-login.yml
+ - 339-ovirt_vm-add-display-params.yml
+ - 340-replace-gluster-module-to-cli.yml
+ - 341-image_template-fix-include-tasks.yml
+ - 346-repositories-replace-redhat_subscription-and-rhsm_repository-with-command.yml
+ - 347-ovirt_disk-add-warning-for-disk-attachemtns.yml
+ - 348-ovirt_vm-add-virtio_scsi_enabled-and-multi_queues_enabled.yml
+ - 350-repositories-add-no_log-to-register.yml
+ - 351-ovirt_remove_stale_lun-allow-user-to-remove-multiple-luns.yml
+ - 352-ovirt_remove_stale_lun-Retry-Multipath-f-while-removing-the-luns.yml
+ - 353-ovirt_permission-add-mac-pool.yml
+ - 355-info-modules-enable-follow-parameter.yml
+ - 356-ovirt_storage_vm_info-and-ovirt_storage_template_info-fix-docs.yml
+ - 358-ovirt_disk-use-imageio-client.yml
+ - 359-he-replace-virt_net-and-xml-with-commands.yml
+ - 363-repositories-update-to-4.4.9.yml
+ - 367-info-rename-follows-to-follows.yml
+ - 368-ovirt_event-correlation_id.yml
+ - 370-ovirt_template-fix-template-version.yml
+ - 371-ovirt_host-fix-failed_state_after_reinstall-condition.yml
+ - 371-ovirt_vm-add-virtio_scsi_multi_queues.yml
+ - 375-manageiq-deprecate.yml
+ - 378-bump-deprecate-version.yml
+ - 380-hosted_engine_setup-honor-he_offline_deployment.yaml
+ - 381-engine_setup-honor-ovirt_engine_setup_offline.yaml
+ - 384-manageiq-add-deprecation-info.yaml
+ - 390-ovirt_remove_stale_lun-Use-add_host-instead-of-delegate_to.yml
+ - 394-hosted_engine_setup-remove-spice-graphic-protocol.yml
+ - 396-info-modules-add-follow-link-url.yml
+ - 397-he_engine_abrt_configuration.yaml
+ - 404_fix_permissions_openscap.yml
+ - 409-hosted_engine_setup-Adjust-files-permissions.yml
+ - 411-hosted_engine_setup-Add-security-profile-name-parameter.yml
+ - 414-prepare-for-4.5-release.yaml
+ - 415-cluster_upgrade-progress.yml
+ - 417-add-no_log-to-manage-vms-state-task.yml
+ - 423-hosted_engine_setup-fix-default-gateway-variable-name.yml
+ - 425-cluster_upgrade-shutdown-vms-only-on-pinned-hosts.yml
+ - 426-ovirt_hosted-engine_setup-support-disa-stig-profile.yml
+ - 430-repositories-fix-force-flag-on-subscription-manager.yml
+ - 431-repositories-add-satellite-support.yml
+ - 436-add-json_query-filter.yml
+ - 438-hosted_engine_setup-replace-xml-module.yml
+ - 439-ovirt_host-add-enroll_certificate.yml
+ - 440-repositories-update-docs.yml
+ - 442-cloud-init_fix_removal_in_airgapped_environment.yml
+ - 443-ovirt_hosted_engine_setup-use-cat-command.yml
+ - 444-plugins-remove_unused_imports.yml
+ - 449-cluster_upgrade-progress-log.yml
+ - 453-replace-calls-to-psql-as-postgres-with-engine_psql.yml
+ - 454-repositories-fix-dnf-module-variable.yml
+ - 459-hosted_engine_setup-fix-call-to-engine-psql-for-vds_spm_id.yml
+ - 465-ovirt_template-add-boot_menu-and-bios_type.yml
+ plugins:
+ callback:
+ - description: Output the log of ansible
+ name: stdout
+ namespace: null
+ release_date: '2022-04-03'
+ 2.0.1:
+ changes:
+ bugfixes:
+ - Make storage_format optional - do not fail if missing (https://github.com/oVirt/ovirt-ansible-collection/pull/471).
+ fragments:
+ - 471-make-storage-format-optional.yml
+ release_date: '2022-04-05'
+ 2.0.2:
+ changes:
+ bugfixes:
+ - Fix progress logging via REST (https://github.com/oVirt/ovirt-ansible-collection/pull/474).
+ fragments:
+ - 471-make-storage-format-optional.yml
+ - 474-cluster-update-log-progress-fix.yml
+ release_date: '2022-04-06'
+ 2.0.3:
+ changes:
+ bugfixes:
+ - invenory - Fix url address (https://github.com/oVirt/ovirt-ansible-collection/pull/482).
+ - ovirt_vm - Fix creating a RAW VM from a COW template (https://github.com/oVirt/ovirt-ansible-collection/pull/466).
+ minor_changes:
+ - ovirt_affinity_group - Add affinity labels (https://github.com/oVirt/ovirt-ansible-collection/pull/481).
+ fragments:
+ - 466-ovirt_vm-fix-creating-raw-vm-from-cow-template.yml
+ - 481-ovirt_affinity_group-add-affinity-labels.yml
+ - 482-inventory-fix-url-address.yml
+ release_date: '2022-04-13'
+ 2.0.4:
+ changes:
+ bugfixes:
+ - Fix the admin user name when using keycloak (https://github.com/oVirt/ovirt-ansible-collection/pull/488).
+ - Use cryptography < 37.0.0, as 37.0.0 emits a warning that fails testing. (https://github.com/oVirt/ovirt-ansible-collection/pull/492).
+ - Use rstcheck < 3.5.0, as 3.5.0 emits a warning that fails testing. (https://github.com/oVirt/ovirt-ansible-collection/pull/490).
+ - cluster_upgrade - fix wait_condition (https://github.com/oVirt/ovirt-ansible-collection/pull/510).
+ - hosted_engine_setup - Allocate 128MiB instead of 1GiB for he_metadata (https://github.com/oVirt/ovirt-ansible-collection/pull/489).
+ - hosted_engine_setup - Collect logs also on failures in 03_hosted_engine_final_tasks.yml
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/504).
+ - hosted_engine_setup - Fix keycloak activation/checking (https://github.com/oVirt/ovirt-ansible-collection/pull/509).
+ - hosted_engine_setup - Require 'detail' to be 'Up' (https://github.com/oVirt/ovirt-ansible-collection/pull/498).
+ - hosted_engine_setup - fix archive ownership (https://github.com/oVirt/ovirt-ansible-collection/pull/501).
+ - infra - add warning for multiple storage connections (https://github.com/oVirt/ovirt-ansible-collection/pull/500).
+ fragments:
+ - 488-configure-keycloak.yml
+ - 489-ovirt_hosted_engine_setup-use-10mib-intead-of-1gib-for-metadata.yml
+ - 490-use-rstcheck-3.4.yml
+ - 492-use-cryptography-older-than-37.yml
+ - 498-require-detail-up.yml
+ - 500-role-infra-add-warning-for-multiple-storage-connections.yml
+ - 501-fix-archive-ownership.yml
+ - 504-collect-final-logs.yml
+ - 509-fix-keycloak-activation.yml
+ - 510-cluster-upgrade-fix-wait_condition.yml
+ release_date: '2022-06-03'
+ 2.1.0:
+ changes:
+ bugfixes:
+ - hosted_engine_setup - Fix "'ansible' ModuleNotFoundError" in Disaster Recovery
+ scripts (https://github.com/oVirt/ovirt-ansible-collection/pull/503).
+ - hosted_engine_setup - Use command instead of firewalld module (https://github.com/oVirt/ovirt-ansible-collection/pull/508).
+ - ovirt_vm - Fix parsing None arguments (https://github.com/oVirt/ovirt-ansible-collection/pull/486).
+ - ovirt_vm - check if the snapshot exists (https://github.com/oVirt/ovirt-ansible-collection/pull/525).
+ minor_changes:
+ - Add convert_to_bytes filter (https://github.com/oVirt/ovirt-ansible-collection/pull/515).
+ - automation - Use python38 on el8 with ansible-core 2.12 and python39 on el9
+ with ansible-core 2.13 (https://github.com/oVirt/ovirt-ansible-collection/pull/518).
+ - cloud.py - Sync with orgin (https://github.com/oVirt/ovirt-ansible-collection/pull/519).
+ - engine_setup - Allow to disable cert validation (https://github.com/oVirt/ovirt-ansible-collection/pull/517).
+ - hosted_engine_setup - make vdsm config cleanup optional (https://github.com/oVirt/ovirt-ansible-collection/pull/521).
+ - ovirt - Remove deprecated distutils (https://github.com/oVirt/ovirt-ansible-collection/pull/516).
+ - ovirt_vm - add wait_after_lease (https://github.com/oVirt/ovirt-ansible-collection/pull/524).
+ fragments:
+ - 483-ovirt_vm-fix-None-args.yml
+ - 503-fix-ansible-ModuleNotFoundError.yml
+ - 508-ovirt_hosted_engine_setup-use-firewalld-command.yml
+ - 515-add-convert_to_bytes-filter.yml
+ - 516-remove-distutils-usage.yml
+ - 517-engien_setup-allow-to-disable-cert-validation.yml
+ - 518-ci-changes.yml
+ - 519-sync-cloud_py.yml
+ - 521-optional-cleanup.yml
+ - 524-ovirt_vm-add-wait_after_lease.yml
+ - 525-ovirt_vm-check-if-snap-exists.yml
+ release_date: '2022-06-09'
+ 2.2.0:
+ changes:
+ bugfixes:
+ - HE - Handle migration to hosts that use systemd-coredump (https://github.com/oVirt/ovirt-ansible-collection/pull/557).
+ - cluster_upgrade - Fix starting up pinned vms (https://github.com/oVirt/ovirt-ansible-collection/pull/532).
+ - he - Align role with ansible-lint-6.0 (https://github.com/oVirt/ovirt-ansible-collection/pull/545).
+ - hosted_engine - Specify fqcn for ovirt_system_option_info (https://github.com/oVirt/ovirt-ansible-collection/pull/536).
+ - hosted_engine_setup - Fix cleanup on el9 (https://github.com/oVirt/ovirt-ansible-collection/pull/533).
+ - image_template - Remove static (https://github.com/oVirt/ovirt-ansible-collection/pull/537).
+ - image_template - Remove static no - unsupported in ansible 2.12 (https://github.com/oVirt/ovirt-ansible-collection/pull/341).
+ - ovirt_host - Fix host wait (https://github.com/oVirt/ovirt-ansible-collection/pull/531).
+ - ovirt_host - Fix restarted wait condition (https://github.com/oVirt/ovirt-ansible-collection/pull/551).
+ - ovirt_storage_domain - Fix inaccessible exception (https://github.com/oVirt/ovirt-ansible-collection/pull/534).
+ - ovirt_vm - check if user inputed graphical protocol (https://github.com/oVirt/ovirt-ansible-collection/pull/542).
+ - repositories - Move fips check to satellite CA install block (https://github.com/oVirt/ovirt-ansible-collection/pull/553).
+ - shutdown_env - Align role with ansible-lint-6.0 (https://github.com/oVirt/ovirt-ansible-collection/pull/544).
+ minor_changes:
+ - During he_setup, configure ovn with he_host_name for correct operation of
+ ovn (https://github.com/oVirt/ovirt-ansible-collection/pull/563).
+ - Fix "ansible-lint" version 6.0.0 "yaml" violations for "disaster_recovery"
+ role (https://github.com/oVirt/ovirt-ansible-collection/pull/543).
+ - Fix "ansible-lint" version 6.0.0 violations for "disaster_recovery" & "remove_stale_lun"
+ roles (https://github.com/oVirt/ovirt-ansible-collection/pull/554).
+ - Fix ansible-lint for basic roles (https://github.com/oVirt/ovirt-ansible-collection/pull/280).
+ - Updating the documentation - "vm_name" / "vm_id" and/or disk "id" parameter(s)
+ are required when extending disk with non-unique name (https://github.com/oVirt/ovirt-ansible-collection/pull/559).
+ - gluster_heal_info - Replacing gluster module to CLI to support RHV automation
+ hub (https://github.com/oVirt/ovirt-ansible-collection/pull/340).
+ - ovirt_disk - Add warning for disk attachments (https://github.com/oVirt/ovirt-ansible-collection/pull/347).
+ - ovirt_disk - Fix disk attachment to VM (https://github.com/oVirt/ovirt-ansible-collection/pull/361).
+ - ovirt_qos, ovirt_disk_profile, ovirt_disk - Add modules to allow for creation
+ and updating of disk_profiles (https://github.com/oVirt/ovirt-ansible-collection/pull/422).
+ - ovirt_snapshot - Add vm_id to select VM (https://github.com/oVirt/ovirt-ansible-collection/pull/550).
+ - ovirt_vm - Add reset of VM (https://github.com/oVirt/ovirt-ansible-collection/pull/538).
+ - ovirt_vm - Add virtio_scsi_enabled and multi_queues_enabled (https://github.com/oVirt/ovirt-ansible-collection/pull/348).
+ - ovirt_vm - add volatile (https://github.com/oVirt/ovirt-ansible-collection/pull/539).
+ - repositories - Add ovirt_repositories_rhsm_environment and FIPS fix (https://github.com/oVirt/ovirt-ansible-collection/pull/483).
+ - repositories - Replace redhat_subscription and rhsm_repository with command
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/346).
+ fragments:
+ - 280-fix-ansible-lint-for-basic-roles.yml
+ - 340-replace-gluster-module-to-cli.yml
+ - 341-image_template-fix-include-tasks.yml
+ - 346-repositories-replace-redhat_subscription-and-rhsm_repository-with-command.yml
+ - 347-ovirt_disk-add-warning-for-disk-attachemtns.yml
+ - 348-ovirt_vm-add-virtio_scsi_enabled-and-multi_queues_enabled.yml
+ - 361-ovirt_disk-fix-disk-attachment.yml
+ - 422-add-qos-and-disk_profle.yml
+ - 483-repositories-add-rhsm_environment.yml
+ - 531-ovirt_host-fix-host-wait.yml
+ - 532-cluster_upgrade-fix-starting-up-pinned-vms.yml
+ - 533_fix_cleanup.yml
+ - 534-ovirt_storage_domain-fix-inaccessible-error.yml
+ - 536-he-specify-fqcn.yml
+ - 537-image_template-remove-static.yml
+ - 538-ovirt_vm-add-reset.yml
+ - 539-ovirt_vm-add-volatile.yml
+ - 542-ovirt_vm-check-graphical-protocol.yml
+ - 543-fix-ansible-lint-for_disaster_recovery.yml
+ - 544-shutdown_env-align-with-ansible-lint-6.0.yml
+ - 545-he-setup-align-with-ansible-lint-6.0.yml
+ - 550-ovirt_snapshot-add-vm_id.yml
+ - 551-ovirt_host-fix-restarted-wait-condition.yml
+ - 553-repositories-move-check-fips.yml
+ - 554-fix-ansible_lint-for-disaster_recovery-and-remove_stale_lun.yml
+ - 557-fix-he-migration-to-systemd-coredump-hosts.yml
+ - 559-update-documentation-ovirt_disk-requires-extra_params-when-extending-disk-with-non-unique-name.yml
+ - 563-he-setup-configure-ovn-with-he-host-fqdn.yml
+ release_date: '2022-07-25'
+ 2.2.1:
+ changes:
+ bugfixes:
+ - hosted_engine_setup - fix hosted-engine.conf permissions and ownership (https://github.com/oVirt/ovirt-ansible-collection/pull/569).
+ minor_changes:
+ - During he_setup, configure ovn with he_host_address (https://github.com/oVirt/ovirt-ansible-collection/pull/568).
+ fragments:
+ - 568-he-setup-configure-ovn-with-he-host-fqdn.yml
+ - 569-heconf-permissions.yml
+ release_date: '2022-08-03'
+ 2.2.2:
+ changes:
+ bugfixes:
+ - hosted_engine_setup - Detect hosted-engine-ha version using /usr/libexec/platform-python
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/573).
+ - hosted_engine_setup - update ansible version in README (https://github.com/oVirt/ovirt-ansible-collection/pull/571).
+ - repositories - Add mod_auth_openidc:2.3 and nodejs:14 to dnf modules (https://github.com/oVirt/ovirt-ansible-collection/pull/578).
+ fragments:
+ - 571-hosted_engine_setup-update-ansible-version-in-readme.yml
+ - 573-hosted_engine_setup-fetch-hosted-engine-ha-version-by-rpm-command.yml
+ - 578-repositories-Add-mod_auth_openidc-and-nodejs-to-dnf-modules.yml
+ release_date: '2022-08-09'
+ 2.2.3:
+ changes:
+ bugfixes:
+ - cluster_upgrade - skip host upgrades without anything to update (https://github.com/oVirt/ovirt-ansible-collection/pull/580).
+ - hosted_engine_setup - restore - remove host also based on name (https://github.com/oVirt/ovirt-ansible-collection/pull/567).
+ - repositories - Fix example variable names (https://github.com/oVirt/ovirt-ansible-collection/pull/582).
+ minor_changes:
+ - hosted_engine_setup - fix ovirt-provider-ovn-driver broken link (https://github.com/oVirt/ovirt-ansible-collection/pull/581).
+ fragments:
+ - 567-hosted_engine_setup-remove-host-based-on-name.yml
+ - 580-cluster_upgrade-skip-host-upgrades-without-anything-to-update.yml
+ - 581-hosted_engine_setup-fix-broken-link.yml
+ - 582-repositories-fix-example-variable-names.yml
+ release_date: '2022-08-15'
+ 2.3.0:
+ changes:
+ bugfixes:
+ - Fix ovirtvmipsv4 when using attribute (https://github.com/oVirt/ovirt-ansible-collection/pull/596).
+ - he-setup - fix static ipv6 ifcfg setup (https://github.com/oVirt/ovirt-ansible-collection/pull/592).
+ - ovirt_host - Honor activate and reboot_after_installation when they are set
+ to false with reinstalled host state (https://github.com/oVirt/ovirt-ansible-collection/pull/587).
+ - repositories - RHV 4.4 SP1 is supported only on RHEL 8.6 EUS (https://github.com/oVirt/ovirt-ansible-collection/pull/576).
+ minor_changes:
+ - filters - Add documentation to all filters (https://github.com/oVirt/ovirt-ansible-collection/pull/603).
+ - ovirt_disk - Add read_only param for disk attachments (https://github.com/oVirt/ovirt-ansible-collection/pull/597).
+ - ovirt_disk - Fix disk attachment to VM (https://github.com/oVirt/ovirt-ansible-collection/pull/361).
+ fragments:
+ - 361-ovirt_disk-fix-disk-attachment.yml
+ - 576-repositories-rhv-4.4-sp1-is-supported-only-on-rhel-8.6-eus.yml
+ - 587-ovirt_host-honor-activate-and-reboot-params.yml
+ - 592-he-setup-static-ipv6.yml
+ - 596-fix-ovirtvmipsv4-when-using-attr.yml
+ - 597-ovirt_disk-add-read_only-param.yml
+ - 603-add-filter-docs.yml
+ release_date: '2022-10-13'
+ 2.3.1:
+ changes:
+ bugfixes:
+ - filters - Fix ovirtvmipsv4 with attribute and network (https://github.com/oVirt/ovirt-ansible-collection/pull/607).
+ - filters - Fix ovirtvmipsv4 with filter to list (https://github.com/oVirt/ovirt-ansible-collection/pull/609).
+ - ovirt_host - Fix kernel_params elemets type (https://github.com/oVirt/ovirt-ansible-collection/pull/608).
+ fragments:
+ - 607-filters-fix-ovirtvmipsv4-with-atribute-and-network.yml
+ - 608-fix-ovirt_host-kernel_params-type.yml
+ - 609-filterip4-fix-filter-list.yml
+ release_date: '2022-10-27'
+ 2.4.0:
+ changes:
+ bugfixes:
+ - cluster_upgrade - Add default random uuid to engine_correlation_id (https://github.com/oVirt/ovirt-ansible-collection/pull/624).
+ - image_template - Add template_bios_type (https://github.com/oVirt/ovirt-ansible-collection/pull/620).
+ fragments:
+ - 625-cluster_upgrade-add-default-random-uuid-to-engine_correlation_id.yml
+ - 626-image_template-add-template_bios_type.yml
+ release_date: '2022-11-15'
+ 2.4.1:
+ changes:
+ bugfixes:
+ - cluster_upgrade - Fix the engine_correlation_id location (https://github.com/oVirt/ovirt-ansible-collection/pull/637).
+ fragments:
+ - 637-cluster_upgrade-fix-the-engine_correlation_id-location.yml
+ release_date: '2022-11-28'
diff --git a/ansible_collections/ovirt/ovirt/changelogs/config.yaml b/ansible_collections/ovirt/ovirt/changelogs/config.yaml
new file mode 100644
index 000000000..929a04004
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/changelogs/config.yaml
@@ -0,0 +1,31 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: false
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: ovirt.ovirt
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/ansible_collections/ovirt/ovirt/changelogs/fragments/.placeholder b/ansible_collections/ovirt/ovirt/changelogs/fragments/.placeholder
new file mode 100644
index 000000000..1a791779a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/changelogs/fragments/.placeholder
@@ -0,0 +1 @@
+This file is a placeholder so when the maintainer is doing build the folder fragments are not deleted.
diff --git a/ansible_collections/ovirt/ovirt/examples/filters/ovirtdiff.yml b/ansible_collections/ovirt/ovirt/examples/filters/ovirtdiff.yml
new file mode 100644
index 000000000..bd9c1ffde
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/examples/filters/ovirtdiff.yml
@@ -0,0 +1,33 @@
+- hosts: localhost
+ connection: local
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+ vars:
+ myvm: centos7
+ tasks:
+ - name: Login
+ ovirt_auth:
+ url: "https://ovirt-engine.example.com/ovirt-engine/api"
+ password: "{{ engine_password | default(omit) }}"
+ username: "admin@internal"
+
+ - name: Get VM myvm
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: 'name={{ myvm }}'
+ next_run: false
+ register: vm
+
+ - name: Get next_run of VM myvm
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: 'name={{ myvm }}'
+ next_run: true
+ register: vm_next_run
+
+ - name: Print what will be changed in next run of the VM
+ debug:
+ msg: "{{ vm.ovirt_vms[0] | ovirt.ovirt.ovirtdiff(vm_next_run.ovirt_vms[0]) }}"
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/examples/filters/vmips.yml b/ansible_collections/ovirt/ovirt/examples/filters/vmips.yml
new file mode 100644
index 000000000..24706c6bd
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/examples/filters/vmips.yml
@@ -0,0 +1,77 @@
+- hosts: localhost
+ connection: local
+ vars:
+ myvm: centos8*
+ tasks:
+ - name: Get VMs
+ ovirt_vm_info:
+ auth:
+ url: "https://ovirt-engine.example.com/ovirt-engine/api"
+ username: "admin@internal"
+ password: "123456"
+ insecure: true
+ pattern: 'name={{ myvm }}'
+ fetch_nested: true
+ nested_attributes: ips
+ register: vms
+ - name: Print VM IP
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmip }}"
+
+ - name: Print VM all IPs
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmips }}"
+
+ - name: Print VM IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv4 }}"
+
+ - name: Print VM all IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv4 }}"
+
+ - name: Print VM all IPv4 from specific network
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv4(network_ip='192.168.2.0/24') }}"
+
+ - name: Print VM IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv6 }}"
+
+ - name: Print VM all IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv6 }}"
+
+ # *********************************************************
+ # *********************************************************
+ - name: ----
+ debug:
+ msg: "-------------------------------------"
+ # *********************************************************
+ # Print VM IPs as dictionaries with name as key
+ # *********************************************************
+ - name: Print VM IP
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmip(attr='name') }}"
+
+ - name: Print VM all IPs
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmips(attr='name') }}"
+
+ - name: Print VM IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv4(attr='name') }}"
+
+ - name: Print VM all IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv4(attr='name') }}"
+
+ - name: Print VM IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv6(attr='name') }}"
+
+ - name: Print VM all IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv6(attr='name') }}"
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/examples/ovirt_ansible_collections.yml b/ansible_collections/ovirt/ovirt/examples/ovirt_ansible_collections.yml
new file mode 100644
index 000000000..866aab150
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/examples/ovirt_ansible_collections.yml
@@ -0,0 +1,21 @@
+---
+- name: oVirt ansible collection
+ hosts: localhost
+ connection: local
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+ tasks:
+ - name: Login
+ ovirt_auth:
+ url: "https://ovirt-engine.example.com/ovirt-engine/api"
+ password: "{{ engine_password | default(omit) }}"
+ username: "admin@internal"
+ - name: Create vm
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ name: vm_name
+ state: present
+ cluster: Default
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/licenses/Apache-license.txt b/ansible_collections/ovirt/ovirt/licenses/Apache-license.txt
new file mode 100644
index 000000000..c4ea8b6f9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/licenses/Apache-license.txt
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Red Hat, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/ansible_collections/ovirt/ovirt/licenses/GPL-license.txt b/ansible_collections/ovirt/ovirt/licenses/GPL-license.txt
new file mode 100644
index 000000000..94a9ed024
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/licenses/GPL-license.txt
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/ansible_collections/ovirt/ovirt/meta/execution-environment.yml b/ansible_collections/ovirt/ovirt/meta/execution-environment.yml
new file mode 100644
index 000000000..ccc0fdd06
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/meta/execution-environment.yml
@@ -0,0 +1,17 @@
+---
+version: 1
+dependencies:
+ system: bindep.txt
+ galaxy: requirements.yml
+ python: requirements.txt
+
+additional_build_steps:
+ prepend:
+ - RUN pip3 install --upgrade pip setuptools
+ # Workaround for https://bugzilla.redhat.com/2024629
+ # - RUN dnf copr enable -y ovirt/ovirt-master-snapshot
+ - RUN rpm --import https://download.copr.fedorainfracloud.org/results/ovirt/ovirt-master-snapshot/pubkey.gpg
+ - RUN dnf --repofrompath=ovirt-master-snapshot,https://download.copr.fedorainfracloud.org/results/ovirt/ovirt-master-snapshot/centos-stream-8-x86_64/ install -y ovirt-release-master
+ - RUN yum install ovirt-ansible-collection -y
+ append:
+ - RUN ls -la /usr/share/ansible/collections/ansible_collections/
diff --git a/ansible_collections/ovirt/ovirt/meta/requirements.yml b/ansible_collections/ovirt/ovirt/meta/requirements.yml
new file mode 100644
index 000000000..59d79b96f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/meta/requirements.yml
@@ -0,0 +1,7 @@
+---
+collections:
+ - name: ansible.netcommon
+ version: <3.0.0
+ - name: ansible.posix
+ - name: ansible.utils
+ version: <2.6.0
diff --git a/ansible_collections/ovirt/ovirt/meta/runtime.yml b/ansible_collections/ovirt/ovirt/meta/runtime.yml
new file mode 100644
index 000000000..1b0ce723c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/meta/runtime.yml
@@ -0,0 +1,82 @@
+---
+requires_ansible: '>=2.9.10'
+action_groups:
+ ovirt:
+ - ovirt_affinity_group
+ - ovirt_affinity_label_facts
+ - ovirt_affinity_label_info
+ - ovirt_affinity_label
+ - ovirt_api_facts
+ - ovirt_api_info
+ - ovirt_auth
+ - ovirt_cluster_facts
+ - ovirt_cluster_info
+ - ovirt_cluster
+ - ovirt_datacenter_facts
+ - ovirt_datacenter_info
+ - ovirt_datacenter
+ - ovirt_disk_facts
+ - ovirt_disk_info
+ - ovirt_disk
+ - ovirt_event_facts
+ - ovirt_event_info
+ - ovirt_event
+ - ovirt_external_provider_facts
+ - ovirt_external_provider_info
+ - ovirt_external_provider
+ - ovirt_group_facts
+ - ovirt_group_info
+ - ovirt_group
+ - ovirt_host_facts
+ - ovirt_host_info
+ - ovirt_host_network
+ - ovirt_host_pm
+ - ovirt_host
+ - ovirt_host_storage_facts
+ - ovirt_host_storage_info
+ - ovirt_instance_type
+ - ovirt_job
+ - ovirt_mac_pool
+ - ovirt_network_facts
+ - ovirt_network_info
+ - ovirt_network
+ - ovirt_nic_facts
+ - ovirt_nic_info
+ - ovirt_nic
+ - ovirt_permission_facts
+ - ovirt_permission_info
+ - ovirt_permission
+ - ovirt_quota_facts
+ - ovirt_quota_info
+ - ovirt_quota
+ - ovirt_role
+ - ovirt_scheduling_policy_facts
+ - ovirt_scheduling_policy_info
+ - ovirt_snapshot_facts
+ - ovirt_snapshot_info
+ - ovirt_snapshot
+ - ovirt_storage_connection
+ - ovirt_storage_domain_facts
+ - ovirt_storage_domain_info
+ - ovirt_storage_domain
+ - ovirt_storage_template_facts
+ - ovirt_storage_template_info
+ - ovirt_storage_vm_facts
+ - ovirt_storage_vm_info
+ - ovirt_tag_facts
+ - ovirt_tag_info
+ - ovirt_tag
+ - ovirt_template_facts
+ - ovirt_template_info
+ - ovirt_template
+ - ovirt_user_facts
+ - ovirt_user_info
+ - ovirt_user
+ - ovirt_vm_facts
+ - ovirt_vm_info
+ - ovirt_vmpool_facts
+ - ovirt_vmpool_info
+ - ovirt_vmpool
+ - ovirt_vm
+ - ovirt_vnic_profile_info
+ - ovirt_vnic_profile
diff --git a/ansible_collections/ovirt/ovirt/ovirt-ansible-collection-2.4.1.tar.gz b/ansible_collections/ovirt/ovirt/ovirt-ansible-collection-2.4.1.tar.gz
new file mode 100644
index 000000000..3f84beee1
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/ovirt-ansible-collection-2.4.1.tar.gz
Binary files differ
diff --git a/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec b/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec
new file mode 100644
index 000000000..94d20c5b4
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec
@@ -0,0 +1,409 @@
+%global namespace ovirt
+%global collectionname ovirt
+%global ansible_collections_dir ansible/collections/ansible_collections
+
+Name: ovirt-ansible-collection
+Summary: Ansible collection to manage all ovirt modules and inventory
+Version: 2.4.1
+Release: 1%{?release_suffix}%{?dist}
+Source0: http://resources.ovirt.org/pub/src/ovirt-ansible-collection/ovirt-ansible-collection-2.4.1.tar.gz
+License: ASL 2.0 and GPLv3+
+BuildArch: noarch
+Url: http://www.ovirt.org
+
+BuildRequires: ansible-core
+BuildRequires: ansible-test
+%if 0%{?rhel} > 7 && 0%{?rhel} < 9
+BuildRequires: glibc-langpack-en
+%endif
+
+Requires: ansible-core >= 2.12.0
+Requires: ovirt-imageio-client
+Requires: python3-ovirt-engine-sdk4 >= 4.5.0
+Requires: python3-netaddr
+Requires: python3-jmespath
+Requires: python3-passlib
+Requires: ansible-collection-ansible-netcommon
+Requires: ansible-collection-ansible-posix
+Requires: ansible-collection-ansible-utils
+Requires: qemu-img
+
+%if 0%{?rhel} < 9
+Requires: python38-ovirt-imageio-client
+Requires: python38-ovirt-engine-sdk4 >= 4.5.0
+Requires: python38-netaddr
+Requires: python38-jmespath
+Requires: python38-passlib
+%endif
+
+Obsoletes: ansible < 2.10.0
+Obsoletes: ovirt-ansible-cluster-upgrade
+Obsoletes: ovirt-ansible-disaster-recovery
+Obsoletes: ovirt-ansible-engine-setup
+Obsoletes: ovirt-ansible-hosted-engine-setup
+Obsoletes: ovirt-ansible-image-template
+Obsoletes: ovirt-ansible-infra
+Obsoletes: ovirt-ansible-manageiq
+Obsoletes: ovirt-ansible-repositories
+Obsoletes: ovirt-ansible-roles
+Obsoletes: ovirt-ansible-shutdown-env
+Obsoletes: ovirt-ansible-vm-infra
+
+Provides: ovirt-ansible-cluster-upgrade
+Provides: ovirt-ansible-disaster-recovery
+Provides: ovirt-ansible-engine-setup
+Provides: ovirt-ansible-hosted-engine-setup
+Provides: ovirt-ansible-image-template
+Provides: ovirt-ansible-infra
+Provides: ovirt-ansible-manageiq
+Provides: ovirt-ansible-repositories
+Provides: ovirt-ansible-roles
+Provides: ovirt-ansible-shutdown-env
+Provides: ovirt-ansible-vm-infra
+
+%description
+This Ansible collection is to manage all ovirt modules and inventory
+
+%prep
+%setup -c -q
+
+%build
+
+%install
+export PKG_DATA_DIR_ORIG=%{_datadir}/%{ansible_collections_dir}
+export PKG_DATA_DIR=%{buildroot}$PKG_DATA_DIR_ORIG
+export PKG_DOC_DIR=%{buildroot}%{_pkgdocdir}
+sh build.sh install %{collectionname}
+
+%files
+%{_datadir}/%{ansible_collections_dir}/%{namespace}
+%if "%{collectionname}" == "rhv"
+%{_datadir}/%{ansible_collections_dir}/ovirt
+%endif
+
+%doc README.md
+%doc examples/
+
+%license licenses
+
+%changelog
+* Tue Nov 15 2022 Martin Necas <mnecas@redhat.com> - 2.4.0-1
+- cluster_upgrade - Add default random uuid to engine_correlation_id
+- image_template - Add template_bios_type
+
+* Thu Oct 27 2022 Martin Necas <mnecas@redhat.com> - 2.3.1-1
+- filters - Fix ovirtvmipsv4 with attribute and network
+- filters - Fix ovirtvmipsv4 with filter to list
+- ovirt_host - Fix kernel_params elemets type
+
+* Thu Oct 13 2022 Martin Necas <mnecas@redhat.com> - 2.3.0-1
+- ovirt_host - Honor activate and reboot_after_installation when they are set to false with reinstalled host state
+- ovirt_disk - Add read_only param for disk attachments
+- ovirt_disk - Fix disk attachment to VM
+- filters - Add documentation to all filters
+- filters- Fix ovirtvmipsv4 when using attribute
+- he-setup - Fix static ipv6 ifcfg setup
+- repositories - RHV 4.4 SP1 is supported only on RHEL 8.6 EUS
+
+* Mon Aug 15 2022 Martin Necas <mnecas@redhat.com> - 2.2.3-1
+- cluster_upgrade - Skip host upgrades without anything to update
+- hosted_engine_setup - Fix ovirt-provider-ovn-driver broken link
+- hosted_engine_setup - restore - Remove host also based on name
+- repositories - Fix example variable names
+
+* Tue Aug 9 2022 Martin Necas <mnecas@redhat.com> - 2.2.2-1
+- hosted_engine_setup - Detect hosted-engine-ha version using /usr/libexec/platform-python
+- hosted_engine_setup - update ansible version in README
+- repositories - Add mod_auth_openidc:2.3 and nodejs:14 to dnf modules
+
+* Wed Aug 3 2022 Martin Necas <mnecas@redhat.com> - 2.2.1-1
+- hosted_engine_setup - Fix hosted-engine.conf permissions and ownership
+- hosted_engine_setup - During he_setup, configure ovn with he_host_address
+
+* Mon Jul 25 2022 Martin Necas <mnecas@redhat.com> - 2.2.0-1
+- cluster_upgrade - Fix starting up pinned vms
+- disaster_recovery - Fix ansible-lint version 6.0.0 violations
+- fix ansible-lint for basic roles(infra, vm_infra, engine_setup, repositories, cluster_upgrade)
+- gluster_heal_info - Replacing gluster module to CLI to support RHV automation hub
+- image_template - Remove static no - unsupported in ansible 2.12
+- hosted_engine - During he_setup, configure ovn with he_host_name for correct operation of ovn
+- hosted_engine - Handle migration to hosts that use systemd-coredump
+- hosted_engine - Specify fqcn for ovirt_system_option_info
+- hosted_engine - Align role with ansible-lint-6.0
+- hosted_engine - Fix cleanup on el9
+- ovirt_disk - Add warning for disk attachments
+- ovirt_disk - Fix disk attachment to VM
+- ovirt_disk - Updating the documentation - vm_name/vm_id and/or disk id parameter(s) are required when extending disk with non-unique name
+- ovirt_host - Fix host wait
+- ovirt_host - Fix restarted wait condition
+- ovirt_qos, ovirt_disk_profile, ovirt_disk - Add modules to allow for creation and updating of disk_profiles
+- ovirt_snapshot - Add vm_id to select VM
+- ovirt_storage_domain - Fix inaccessible exception
+- ovirt_vm - Add reset of VM
+- ovirt_vm - Add virtio_scsi_enabled and multi_queues_enabled
+- ovirt_vm - Add volatile
+- ovirt_vm - Check if user inputed graphical protocol
+- remove_stale_lun - Fix ansible-lint version 6.0.0 violations
+- repositories - Add ovirt_repositories_rhsm_environment and FIPS fix
+- repositories - Replace redhat_subscription and rhsm_repository with command
+- repositories - Move fips check to satellite CA install block
+- shutdown_env - Align role with ansible-lint-6.0.0
+
+* Thu Jun 9 2022 Martin Necas <mnecas@redhat.com> - 2.1.0-1
+- Add convert_to_bytes filter
+- automation - Use python38 on el8 with ansible-core 2.12 and python39 on el9 with ansible-core 2.13
+- engine_setup - Allow to disable cert validation
+- ovirt - Remove deprecated distutils
+- ovirt_vm - add wait_after_lease
+- ovirt_vm - Fix parsing None arguments
+- ovirt_vm - check if the snapshot exists
+- hosted_engine_setup - make vdsm config cleanup optional
+- hosted_engine_setup - Fix "'ansible' ModuleNotFoundError" in Disaster Recovery scripts
+- hosted_engine_setup - Use command instead of firewalld module
+
+* Fri Jun 3 2022 Martin Necas <mnecas@redhat.com> - 2.0.4-1
+- Fix the admin user name when using keycloak
+- Use cryptography < 37.0.0, as 37.0.0 emits a warning that fails testing
+- Use rstcheck < 3.5.0, as 3.5.0 emits a warning that fails testing
+- cluster_upgrade - fix wait_condition
+- hosted_engine_setup - Allocate 128MiB instead of 1GiB for he_metadata
+- hosted_engine_setup - Collect logs also on failures in 03_hosted_engine_final_tasks.yml
+- hosted_engine_setup - Fix keycloak activation/checking
+- hosted_engine_setup - Require 'detail' to be 'Up'
+- hosted_engine_setup - fix archive ownership
+- infra - add warning for multiple storage connections
+
+* Wed Apr 13 2022 Martin Necas <mnecas@redhat.com> - 2.0.3-1
+- spec: Obsolete ansible < 2.10.0
+- ovirt_vm - Fix creating a RAW VM from a COW template
+- ovirt_affinity_group - Add affinity labels
+- invenory - Fix url address
+
+* Wed Apr 6 2022 Martin Necas <mnecas@redhat.com> - 2.0.2-1
+- cluster_upgrade: fix upgrade progress log_progress task
+
+* Tue Apr 5 2022 Martin Necas <mnecas@redhat.com> - 2.0.1-1
+- ovirt_storage_domain: make storage_format optional
+
+* Mon Apr 4 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-1
+- ovirt_template: add boot_menu and bios_type
+- roles: hosted_engine_setup - Add an option to set the storage format when createing a storage domain and use it
+- spec: Add python38-ovirt-imageio-client requirement
+
+* Fri Mar 25 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.9.BETA
+- roles: hosted_engine_setup: Fix call to engine-psql for vds_spm_id
+
+* Fri Mar 25 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.8.BETA
+- roles: cluster_upgrade: Directly log progress to the cluster
+- spec: Add collections requirements
+
+* Thu Mar 24 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.7.BETA
+- roles: hosted_engine_setup: Replace calls to psql as postgres with engine_psql.sh
+- spec: Add python38 requirements
+
+* Tue Mar 8 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.6.BETA
+- roles: hosted_engine_setup: Make cloud-init removal airgapped compatible
+- roles: hosted_engine_setup: Replace xml community module
+- roles: hosted_engine_setup: Support disa stig profile
+- roles: hosted_engine_setup: Use cat command instead of lookup
+- roles: repositories: Add satellite support
+- plugins: Remove unused imports
+- ovirt_host: Add enroll_certificate
+
+* Tue Feb 15 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.5.BETA
+- spec: Remove ansible requirements
+- roles: cluster_upgrade: Shutdown vms only on pinned to upgrade host
+- roles: hosted_engine_setup: Fix default gateway variable name
+
+* Tue Jan 25 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.4.BETA
+- roles: cluster_upgrade: Add progress tracking/reporting
+- roles: hosted_engine_setup: Adjust files permissions
+- roles: hosted_engine_setup: Add an option to define OpenSCAP security profile name
+- roles: engine_setup: Prepare answer files and default values for 4.5 release
+- info - Add follow link url to api model links_summary
+
+* Thu Dec 16 2021 Martin Perina <mperina@redhat.com> - 2.0.0-0.3.BETA
+- roles: hosted_engine_setup: Set ownership of copied engine logs
+- roles: hosted_engine_setup: Remove SPICE from graphic protocols
+
+* Wed Dec 8 2021 Martin Perina <mperina@redhat.com> - 2.0.0-0.2.BETA
+- Fix ovirt_storage_domain entity
+- roles: hosted_engine_setup: check if abrt config files exists on HE deploy
+- manageiq: deprecate role
+- Fix remove_stale_lun whitespace
+- ovirt_remove_stale_lun: Use add_host instead of delegate_to
+- manageiq: add deprecation info
+- ovirt_remove_stale_lun: Retry "multipath -f" while removing the LUNs
+- engine_setup: skip pkg install in offline mode
+- add virtio_scsi_multi_queues parameter to ovirt_vm
+- Fix offline deployment
+- ovirt_host: fix failed_state_after_reinstall condition
+
+* Fri Dec 3 2021 Martin Necas <mnecas@redhat.com> - 2.0.0-0.1.BETA
+- ovirt_disk - Use imageio client
+
+* Fri Nov 26 2021 Martin Necas <mnecas@redhat.com> - 1.6.6-1
+- ovirt_remove_stale_lun - Allow user to remove multiple LUNs
+- ovirt_remove_stale_lun - Retry "multipath -f" while removing the LUNs
+- manageiq - Add deprecation info
+- info - Enable follow parameter
+- info - bump deprecate version for fetch_nested and nested_attributes
+- info - Rename follows to follow parameter and add alias
+
+* Tue Oct 19 2021 Martin Necas <mnecas@redhat.com> - 1.6.5-1
+- repositories - Update host and engine repositories to 4.4.9
+
+* Mon Sep 27 2021 Martin Necas <mnecas@redhat.com> - 1.6.4-1
+- repositories - Add no_log to redhat_subscription
+
+* Tue Sep 21 2021 Martin Necas <mnecas@redhat.com> - 1.6.3-1
+- repositories - Replace redhat_subscription and rhsm_repository with command
+- gluster_heal_info - Replacing gluster module to CLI to support RHV automation hub
+- image_template - Remove static no - unsupported in ansible 2.12
+
+* Thu Aug 26 2021 Martin Necas <mnecas@redhat.com> - 1.6.2-1
+- remove_stale_lun - Fix example for `remote_stale_lun` role to be able to run it from engine
+
+* Wed Aug 25 2021 Martin Necas <mnecas@redhat.com> - 1.6.1-1
+- ovirt_auth - Fix no_log token issue
+- hosted_engine_setup - Use default bridge for IPv6 advertisements
+
+* Wed Aug 11 2021 Martin Necas <mnecas@redhat.com> - 1.6.0-1
+- remove_stale_lun - Add role for removing stale LUN
+- readme - Update Ansible requirement
+- ovirt_disk - Fix update_check with no VM
+- ovirt_auth - Fix password and username requirements
+- engine_setup - Wait for webserver up after engine-config reboot
+- hosted_engine_setup - Update Ansible requirements in README
+- hosted_engine_setup - Pause deployment on failure of 'engine-backup --mode=restore'
+- hosted_engine_setup - Text change - Consistently use 'bootstrap engine VM'
+- hosted_engine_setup - Align with ansible-lint 5.0.0
+
+* Thu Jul 22 2021 Martin Necas <mnecas@redhat.com> - 1.5.4-1
+- hosted_engine_setup - Allow FIPS on HE VM
+- hosted_engine_setup - remove duplicate tasks
+- hosted_engine_setup - Use forward network during an IPv6 deployment
+- ovirt_permission - fix group search that has space in it's name
+
+* Fri Jun 25 2021 Martin Necas <mnecas@redhat.com> - 1.5.3-1
+- disaster_recovery - Don't rely on safe_eval being able to do math/concat
+- hosted_engine_setup - Minor doc update
+- hosted_engine_setup - Fix engine vm add_host for the target machine
+
+* Wed Jun 23 2021 Martin Necas <mnecas@redhat.com> - 1.5.2-1
+- ovirt_vm - Add default return value to check_placement_policy.
+- hosted_engine_setup - Do not try to sync at end of full_execution.
+
+* Thu Jun 17 2021 Martin Necas <mnecas@redhat.com> - 1.5.1-1
+- hosted_engine_setup - Filter VLAN devices with bad names
+- ovirt_vm - Add placement_policy_hosts
+- infra - Add external_provider parameter on networks role of infra role
+- hosted_engine_setup - use-ansible-host
+- hosted_engine_setup - Remove cloud-init configuration
+- ovirt inventory plugin - allow several valid values for the `plugin` key
+
+* Fri Jun 4 2021 Martin Necas <mnecas@redhat.com> - 1.5.0-1
+- ovirt_host - Update iscsi target struct
+- infra - Storage fix parameters typo
+- disaster_recovery - Change conf paths to relative paths
+- hosted_engine_setup - Add pause option before engine-setup
+- hosted_engine_setup - Align with ansible-lint 5.0.0
+- hosted_engine_setup - Remove leftover code and omit parameters
+- hosted_engine_setup - Use ovirt_host module to discover iscsi
+
+* Fri Apr 23 2021 Martin Necas <mnecas@redhat.com> - 1.4.2-1
+- repositories - Add ppc host
+- repositories - Remove ansible channels from RHV 4.4
+- infra - Remove storage connection target usage
+- hosted_engine_setup - Fix the appliance distribution
+- hosted_engine_setup - Add an error message for FIPS on CentOS
+- ovirt_vm - Allow cluster migration
+
+* Mon Mar 22 2021 Martin Necas <mnecas@redhat.com> - 1.4.1-1
+- hosted_engine_setup - Fix auth revoke
+
+* Tue Mar 16 2021 Martin Necas <mnecas@redhat.com> - 1.4.0-1
+- cluster_upgrade - Add correlation-id header
+- engine_setup - Add skip renew pki confirm
+- examples - Add recipe for removing DM device
+- hosted_engine_setup - Filter devices with unsupported bond mode
+- infra - Add reboot host parameters
+- ovirt_disk - Add SATA support
+- ovirt_user - Add ssh_public_key
+- Set auth options into argument spec definition
+
+* Wed Feb 10 2021 Martin Necas <mnecas@redhat.com> - 1.3.1-1
+- ovirt_host - Add reboot_after_installation option
+- hosted_engine_setup - Disable reboot_after_installation
+
+* Thu Jan 28 2021 Martin Necas <mnecas@redhat.com> - 1.3.0-1
+- ovirt_system_option_info - Add new module
+- ansible-builder - Update bindep
+- hosted_engine_setup - Collect all engine /var/log
+- hosted_engine_setup - Use ovirt_system_option_info instead of REST API
+- ovirt_disk - Add install warning
+- ovirt_info - Fragment add auth suboptions to documentation
+
+* Mon Dec 14 2020 Martin Necas <mnecas@redhat.com> - 1.2.4-1
+- infra - Allow remove of user without password
+- inventory plugin - Correct os_type name
+- ovirt_disk - automatically detect virtual size of qcow image
+
+* Mon Nov 30 2020 Martin Necas <mnecas@redhat.com> - 1.2.3-1
+- Add hosted_engine_setup after_add_host hook
+- Add engine_setup restore files
+
+* Thu Nov 12 2020 Martin Perina <mperina@redhat.com> - 1.2.2-1
+- inventory plugin - Fix Python 2 timestamp issue
+- hosted_engine_setup - Clean VNC encryption config
+- RPM packaging - Add Provides to previous oVirt Ansible roles RPMs to
+ minimize upgrade issues
+
+* Mon Nov 2 2020 Martin Necas <mnecas@redhat.com> - 1.2.1-1
+- Split README for build and GitHub
+- Add ovirt_repositories_disable_gpg_check to repositories
+
+* Tue Oct 27 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-1
+- Fix ovirt_disk ignore moving of hosted engine disks
+- Obsolete old roles
+
+* Mon Oct 12 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.2
+- Add role disaster_recovery
+- Fix engine_setup yum.conf
+- Fix hosted_engine_setup - Allow uppercase characters in mac address
+
+* Mon Oct 12 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.2
+- Add ovirt_vm_info current_cd
+- Add ovirt_nic_info template
+- Add ovirt_nic template_version
+- Fix ovirt_disk move
+- Fix ovirt inventory connection close
+- Fix ovirt_vm rename q35_sea to q35_sea_bios
+- Fix ovirt_vm template search
+
+* Wed Sep 16 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.1
+- Add role cluster_upgrade
+- Add role engine_setup
+- Add role vm_infra
+- Add role infra
+- Add role manageiq
+- Add role hosted_engine_setup
+- Add role image_template
+- Add role shutdown_env
+
+* Mon Aug 17 2020 Martin Necas <mnecas@redhat.com> - 1.1.2-1
+- Add ansible changelogs
+
+* Wed Aug 12 2020 Martin Necas <mnecas@redhat.com> - 1.1.1-1
+- Fix ovirt_permission FQCNs
+
+* Wed Aug 12 2020 Martin Necas <mnecas@redhat.com> - 1.1.0-1
+- Add ovirt_vm_os_info module
+- Add ovirt_disk backup
+- Add ovirt_disk autodetect size when uploading
+- Add ovirt_host add ssh_port
+- Add ovirt_network support of removing vlan_tag
+- Fix ovirt_disk upload
+
+* Thu Apr 9 2020 Martin Necas <mnecas@redhat.com> - 1.0.0-1
+- Initial release
diff --git a/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec.in b/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec.in
new file mode 100644
index 000000000..0d8d5849e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec.in
@@ -0,0 +1,409 @@
+%global namespace ovirt
+%global collectionname ovirt
+%global ansible_collections_dir ansible/collections/ansible_collections
+
+Name: @PACKAGE_NAME@
+Summary: Ansible collection to manage all ovirt modules and inventory
+Version: @RPM_VERSION@
+Release: @RPM_RELEASE@%{?release_suffix}%{?dist}
+Source0: http://resources.ovirt.org/pub/src/@PACKAGE_NAME@/@PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz
+License: ASL 2.0 and GPLv3+
+BuildArch: noarch
+Url: http://www.ovirt.org
+
+BuildRequires: ansible-core
+BuildRequires: ansible-test
+%if 0%{?rhel} > 7 && 0%{?rhel} < 9
+BuildRequires: glibc-langpack-en
+%endif
+
+Requires: ansible-core >= 2.12.0
+Requires: ovirt-imageio-client
+Requires: python3-ovirt-engine-sdk4 >= 4.5.0
+Requires: python3-netaddr
+Requires: python3-jmespath
+Requires: python3-passlib
+Requires: ansible-collection-ansible-netcommon
+Requires: ansible-collection-ansible-posix
+Requires: ansible-collection-ansible-utils
+Requires: qemu-img
+
+%if 0%{?rhel} < 9
+Requires: python38-ovirt-imageio-client
+Requires: python38-ovirt-engine-sdk4 >= 4.5.0
+Requires: python38-netaddr
+Requires: python38-jmespath
+Requires: python38-passlib
+%endif
+
+Obsoletes: ansible < 2.10.0
+Obsoletes: ovirt-ansible-cluster-upgrade
+Obsoletes: ovirt-ansible-disaster-recovery
+Obsoletes: ovirt-ansible-engine-setup
+Obsoletes: ovirt-ansible-hosted-engine-setup
+Obsoletes: ovirt-ansible-image-template
+Obsoletes: ovirt-ansible-infra
+Obsoletes: ovirt-ansible-manageiq
+Obsoletes: ovirt-ansible-repositories
+Obsoletes: ovirt-ansible-roles
+Obsoletes: ovirt-ansible-shutdown-env
+Obsoletes: ovirt-ansible-vm-infra
+
+Provides: ovirt-ansible-cluster-upgrade
+Provides: ovirt-ansible-disaster-recovery
+Provides: ovirt-ansible-engine-setup
+Provides: ovirt-ansible-hosted-engine-setup
+Provides: ovirt-ansible-image-template
+Provides: ovirt-ansible-infra
+Provides: ovirt-ansible-manageiq
+Provides: ovirt-ansible-repositories
+Provides: ovirt-ansible-roles
+Provides: ovirt-ansible-shutdown-env
+Provides: ovirt-ansible-vm-infra
+
+%description
+This Ansible collection is to manage all ovirt modules and inventory
+
+%prep
+%setup -c -q
+
+%build
+
+%install
+export PKG_DATA_DIR_ORIG=%{_datadir}/%{ansible_collections_dir}
+export PKG_DATA_DIR=%{buildroot}$PKG_DATA_DIR_ORIG
+export PKG_DOC_DIR=%{buildroot}%{_pkgdocdir}
+sh build.sh install %{collectionname}
+
+%files
+%{_datadir}/%{ansible_collections_dir}/%{namespace}
+%if "%{collectionname}" == "rhv"
+%{_datadir}/%{ansible_collections_dir}/ovirt
+%endif
+
+%doc README.md
+%doc examples/
+
+%license licenses
+
+%changelog
+* Tue Nov 15 2022 Martin Necas <mnecas@redhat.com> - 2.4.0-1
+- cluster_upgrade - Add default random uuid to engine_correlation_id
+- image_template - Add template_bios_type
+
+* Thu Oct 27 2022 Martin Necas <mnecas@redhat.com> - 2.3.1-1
+- filters - Fix ovirtvmipsv4 with attribute and network
+- filters - Fix ovirtvmipsv4 with filter to list
+- ovirt_host - Fix kernel_params elemets type
+
+* Thu Oct 13 2022 Martin Necas <mnecas@redhat.com> - 2.3.0-1
+- ovirt_host - Honor activate and reboot_after_installation when they are set to false with reinstalled host state
+- ovirt_disk - Add read_only param for disk attachments
+- ovirt_disk - Fix disk attachment to VM
+- filters - Add documentation to all filters
+- filters- Fix ovirtvmipsv4 when using attribute
+- he-setup - Fix static ipv6 ifcfg setup
+- repositories - RHV 4.4 SP1 is supported only on RHEL 8.6 EUS
+
+* Mon Aug 15 2022 Martin Necas <mnecas@redhat.com> - 2.2.3-1
+- cluster_upgrade - Skip host upgrades without anything to update
+- hosted_engine_setup - Fix ovirt-provider-ovn-driver broken link
+- hosted_engine_setup - restore - Remove host also based on name
+- repositories - Fix example variable names
+
+* Tue Aug 9 2022 Martin Necas <mnecas@redhat.com> - 2.2.2-1
+- hosted_engine_setup - Detect hosted-engine-ha version using /usr/libexec/platform-python
+- hosted_engine_setup - update ansible version in README
+- repositories - Add mod_auth_openidc:2.3 and nodejs:14 to dnf modules
+
+* Wed Aug 3 2022 Martin Necas <mnecas@redhat.com> - 2.2.1-1
+- hosted_engine_setup - Fix hosted-engine.conf permissions and ownership
+- hosted_engine_setup - During he_setup, configure ovn with he_host_address
+
+* Mon Jul 25 2022 Martin Necas <mnecas@redhat.com> - 2.2.0-1
+- cluster_upgrade - Fix starting up pinned vms
+- disaster_recovery - Fix ansible-lint version 6.0.0 violations
+- fix ansible-lint for basic roles(infra, vm_infra, engine_setup, repositories, cluster_upgrade)
+- gluster_heal_info - Replacing gluster module to CLI to support RHV automation hub
+- image_template - Remove static no - unsupported in ansible 2.12
+- hosted_engine - During he_setup, configure ovn with he_host_name for correct operation of ovn
+- hosted_engine - Handle migration to hosts that use systemd-coredump
+- hosted_engine - Specify fqcn for ovirt_system_option_info
+- hosted_engine - Align role with ansible-lint-6.0
+- hosted_engine - Fix cleanup on el9
+- ovirt_disk - Add warning for disk attachments
+- ovirt_disk - Fix disk attachment to VM
+- ovirt_disk - Updating the documentation - vm_name/vm_id and/or disk id parameter(s) are required when extending disk with non-unique name
+- ovirt_host - Fix host wait
+- ovirt_host - Fix restarted wait condition
+- ovirt_qos, ovirt_disk_profile, ovirt_disk - Add modules to allow for creation and updating of disk_profiles
+- ovirt_snapshot - Add vm_id to select VM
+- ovirt_storage_domain - Fix inaccessible exception
+- ovirt_vm - Add reset of VM
+- ovirt_vm - Add virtio_scsi_enabled and multi_queues_enabled
+- ovirt_vm - Add volatile
+- ovirt_vm - Check if user inputed graphical protocol
+- remove_stale_lun - Fix ansible-lint version 6.0.0 violations
+- repositories - Add ovirt_repositories_rhsm_environment and FIPS fix
+- repositories - Replace redhat_subscription and rhsm_repository with command
+- repositories - Move fips check to satellite CA install block
+- shutdown_env - Align role with ansible-lint-6.0.0
+
+* Thu Jun 9 2022 Martin Necas <mnecas@redhat.com> - 2.1.0-1
+- Add convert_to_bytes filter
+- automation - Use python38 on el8 with ansible-core 2.12 and python39 on el9 with ansible-core 2.13
+- engine_setup - Allow to disable cert validation
+- ovirt - Remove deprecated distutils
+- ovirt_vm - add wait_after_lease
+- ovirt_vm - Fix parsing None arguments
+- ovirt_vm - check if the snapshot exists
+- hosted_engine_setup - make vdsm config cleanup optional
+- hosted_engine_setup - Fix "'ansible' ModuleNotFoundError" in Disaster Recovery scripts
+- hosted_engine_setup - Use command instead of firewalld module
+
+* Fri Jun 3 2022 Martin Necas <mnecas@redhat.com> - 2.0.4-1
+- Fix the admin user name when using keycloak
+- Use cryptography < 37.0.0, as 37.0.0 emits a warning that fails testing
+- Use rstcheck < 3.5.0, as 3.5.0 emits a warning that fails testing
+- cluster_upgrade - fix wait_condition
+- hosted_engine_setup - Allocate 128MiB instead of 1GiB for he_metadata
+- hosted_engine_setup - Collect logs also on failures in 03_hosted_engine_final_tasks.yml
+- hosted_engine_setup - Fix keycloak activation/checking
+- hosted_engine_setup - Require 'detail' to be 'Up'
+- hosted_engine_setup - fix archive ownership
+- infra - add warning for multiple storage connections
+
+* Wed Apr 13 2022 Martin Necas <mnecas@redhat.com> - 2.0.3-1
+- spec: Obsolete ansible < 2.10.0
+- ovirt_vm - Fix creating a RAW VM from a COW template
+- ovirt_affinity_group - Add affinity labels
+- invenory - Fix url address
+
+* Wed Apr 6 2022 Martin Necas <mnecas@redhat.com> - 2.0.2-1
+- cluster_upgrade: fix upgrade progress log_progress task
+
+* Tue Apr 5 2022 Martin Necas <mnecas@redhat.com> - 2.0.1-1
+- ovirt_storage_domain: make storage_format optional
+
+* Mon Apr 4 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-1
+- ovirt_template: add boot_menu and bios_type
+- roles: hosted_engine_setup - Add an option to set the storage format when createing a storage domain and use it
+- spec: Add python38-ovirt-imageio-client requirement
+
+* Fri Mar 25 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.9.BETA
+- roles: hosted_engine_setup: Fix call to engine-psql for vds_spm_id
+
+* Fri Mar 25 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.8.BETA
+- roles: cluster_upgrade: Directly log progress to the cluster
+- spec: Add collections requirements
+
+* Thu Mar 24 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.7.BETA
+- roles: hosted_engine_setup: Replace calls to psql as postgres with engine_psql.sh
+- spec: Add python38 requirements
+
+* Tue Mar 8 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.6.BETA
+- roles: hosted_engine_setup: Make cloud-init removal airgapped compatible
+- roles: hosted_engine_setup: Replace xml community module
+- roles: hosted_engine_setup: Support disa stig profile
+- roles: hosted_engine_setup: Use cat command instead of lookup
+- roles: repositories: Add satellite support
+- plugins: Remove unused imports
+- ovirt_host: Add enroll_certificate
+
+* Tue Feb 15 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.5.BETA
+- spec: Remove ansible requirements
+- roles: cluster_upgrade: Shutdown vms only on pinned to upgrade host
+- roles: hosted_engine_setup: Fix default gateway variable name
+
+* Tue Jan 25 2022 Martin Necas <mnecas@redhat.com> - 2.0.0-0.4.BETA
+- roles: cluster_upgrade: Add progress tracking/reporting
+- roles: hosted_engine_setup: Adjust files permissions
+- roles: hosted_engine_setup: Add an option to define OpenSCAP security profile name
+- roles: engine_setup: Prepare answer files and default values for 4.5 release
+- info - Add follow link url to api model links_summary
+
+* Thu Dec 16 2021 Martin Perina <mperina@redhat.com> - 2.0.0-0.3.BETA
+- roles: hosted_engine_setup: Set ownership of copied engine logs
+- roles: hosted_engine_setup: Remove SPICE from graphic protocols
+
+* Wed Dec 8 2021 Martin Perina <mperina@redhat.com> - 2.0.0-0.2.BETA
+- Fix ovirt_storage_domain entity
+- roles: hosted_engine_setup: check if abrt config files exists on HE deploy
+- manageiq: deprecate role
+- Fix remove_stale_lun whitespace
+- ovirt_remove_stale_lun: Use add_host instead of delegate_to
+- manageiq: add deprecation info
+- ovirt_remove_stale_lun: Retry "multipath -f" while removing the LUNs
+- engine_setup: skip pkg install in offline mode
+- add virtio_scsi_multi_queues parameter to ovirt_vm
+- Fix offline deployment
+- ovirt_host: fix failed_state_after_reinstall condition
+
+* Fri Dec 3 2021 Martin Necas <mnecas@redhat.com> - 2.0.0-0.1.BETA
+- ovirt_disk - Use imageio client
+
+* Fri Nov 26 2021 Martin Necas <mnecas@redhat.com> - 1.6.6-1
+- ovirt_remove_stale_lun - Allow user to remove multiple LUNs
+- ovirt_remove_stale_lun - Retry "multipath -f" while removing the LUNs
+- manageiq - Add deprecation info
+- info - Enable follow parameter
+- info - bump deprecate version for fetch_nested and nested_attributes
+- info - Rename follows to follow parameter and add alias
+
+* Tue Oct 19 2021 Martin Necas <mnecas@redhat.com> - 1.6.5-1
+- repositories - Update host and engine repositories to 4.4.9
+
+* Mon Sep 27 2021 Martin Necas <mnecas@redhat.com> - 1.6.4-1
+- repositories - Add no_log to redhat_subscription
+
+* Tue Sep 21 2021 Martin Necas <mnecas@redhat.com> - 1.6.3-1
+- repositories - Replace redhat_subscription and rhsm_repository with command
+- gluster_heal_info - Replacing gluster module to CLI to support RHV automation hub
+- image_template - Remove static no - unsupported in ansible 2.12
+
+* Thu Aug 26 2021 Martin Necas <mnecas@redhat.com> - 1.6.2-1
+- remove_stale_lun - Fix example for `remote_stale_lun` role to be able to run it from engine
+
+* Wed Aug 25 2021 Martin Necas <mnecas@redhat.com> - 1.6.1-1
+- ovirt_auth - Fix no_log token issue
+- hosted_engine_setup - Use default bridge for IPv6 advertisements
+
+* Wed Aug 11 2021 Martin Necas <mnecas@redhat.com> - 1.6.0-1
+- remove_stale_lun - Add role for removing stale LUN
+- readme - Update Ansible requirement
+- ovirt_disk - Fix update_check with no VM
+- ovirt_auth - Fix password and username requirements
+- engine_setup - Wait for webserver up after engine-config reboot
+- hosted_engine_setup - Update Ansible requirements in README
+- hosted_engine_setup - Pause deployment on failure of 'engine-backup --mode=restore'
+- hosted_engine_setup - Text change - Consistently use 'bootstrap engine VM'
+- hosted_engine_setup - Align with ansible-lint 5.0.0
+
+* Thu Jul 22 2021 Martin Necas <mnecas@redhat.com> - 1.5.4-1
+- hosted_engine_setup - Allow FIPS on HE VM
+- hosted_engine_setup - remove duplicate tasks
+- hosted_engine_setup - Use forward network during an IPv6 deployment
+- ovirt_permission - fix group search that has space in it's name
+
+* Fri Jun 25 2021 Martin Necas <mnecas@redhat.com> - 1.5.3-1
+- disaster_recovery - Don't rely on safe_eval being able to do math/concat
+- hosted_engine_setup - Minor doc update
+- hosted_engine_setup - Fix engine vm add_host for the target machine
+
+* Wed Jun 23 2021 Martin Necas <mnecas@redhat.com> - 1.5.2-1
+- ovirt_vm - Add default return value to check_placement_policy.
+- hosted_engine_setup - Do not try to sync at end of full_execution.
+
+* Thu Jun 17 2021 Martin Necas <mnecas@redhat.com> - 1.5.1-1
+- hosted_engine_setup - Filter VLAN devices with bad names
+- ovirt_vm - Add placement_policy_hosts
+- infra - Add external_provider parameter on networks role of infra role
+- hosted_engine_setup - use-ansible-host
+- hosted_engine_setup - Remove cloud-init configuration
+- ovirt inventory plugin - allow several valid values for the `plugin` key
+
+* Fri Jun 4 2021 Martin Necas <mnecas@redhat.com> - 1.5.0-1
+- ovirt_host - Update iscsi target struct
+- infra - Storage fix parameters typo
+- disaster_recovery - Change conf paths to relative paths
+- hosted_engine_setup - Add pause option before engine-setup
+- hosted_engine_setup - Align with ansible-lint 5.0.0
+- hosted_engine_setup - Remove leftover code and omit parameters
+- hosted_engine_setup - Use ovirt_host module to discover iscsi
+
+* Fri Apr 23 2021 Martin Necas <mnecas@redhat.com> - 1.4.2-1
+- repositories - Add ppc host
+- repositories - Remove ansible channels from RHV 4.4
+- infra - Remove storage connection target usage
+- hosted_engine_setup - Fix the appliance distribution
+- hosted_engine_setup - Add an error message for FIPS on CentOS
+- ovirt_vm - Allow cluster migration
+
+* Mon Mar 22 2021 Martin Necas <mnecas@redhat.com> - 1.4.1-1
+- hosted_engine_setup - Fix auth revoke
+
+* Tue Mar 16 2021 Martin Necas <mnecas@redhat.com> - 1.4.0-1
+- cluster_upgrade - Add correlation-id header
+- engine_setup - Add skip renew pki confirm
+- examples - Add recipe for removing DM device
+- hosted_engine_setup - Filter devices with unsupported bond mode
+- infra - Add reboot host parameters
+- ovirt_disk - Add SATA support
+- ovirt_user - Add ssh_public_key
+- Set auth options into argument spec definition
+
+* Wed Feb 10 2021 Martin Necas <mnecas@redhat.com> - 1.3.1-1
+- ovirt_host - Add reboot_after_installation option
+- hosted_engine_setup - Disable reboot_after_installation
+
+* Thu Jan 28 2021 Martin Necas <mnecas@redhat.com> - 1.3.0-1
+- ovirt_system_option_info - Add new module
+- ansible-builder - Update bindep
+- hosted_engine_setup - Collect all engine /var/log
+- hosted_engine_setup - Use ovirt_system_option_info instead of REST API
+- ovirt_disk - Add install warning
+- ovirt_info - Fragment add auth suboptions to documentation
+
+* Mon Dec 14 2020 Martin Necas <mnecas@redhat.com> - 1.2.4-1
+- infra - Allow remove of user without password
+- inventory plugin - Correct os_type name
+- ovirt_disk - automatically detect virtual size of qcow image
+
+* Mon Nov 30 2020 Martin Necas <mnecas@redhat.com> - 1.2.3-1
+- Add hosted_engine_setup after_add_host hook
+- Add engine_setup restore files
+
+* Thu Nov 12 2020 Martin Perina <mperina@redhat.com> - 1.2.2-1
+- inventory plugin - Fix Python 2 timestamp issue
+- hosted_engine_setup - Clean VNC encryption config
+- RPM packaging - Add Provides to previous oVirt Ansible roles RPMs to
+ minimize upgrade issues
+
+* Mon Nov 2 2020 Martin Necas <mnecas@redhat.com> - 1.2.1-1
+- Split README for build and GitHub
+- Add ovirt_repositories_disable_gpg_check to repositories
+
+* Tue Oct 27 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-1
+- Fix ovirt_disk ignore moving of hosted engine disks
+- Obsolete old roles
+
+* Mon Oct 12 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.2
+- Add role disaster_recovery
+- Fix engine_setup yum.conf
+- Fix hosted_engine_setup - Allow uppercase characters in mac address
+
+* Mon Oct 12 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.2
+- Add ovirt_vm_info current_cd
+- Add ovirt_nic_info template
+- Add ovirt_nic template_version
+- Fix ovirt_disk move
+- Fix ovirt inventory connection close
+- Fix ovirt_vm rename q35_sea to q35_sea_bios
+- Fix ovirt_vm template search
+
+* Wed Sep 16 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.1
+- Add role cluster_upgrade
+- Add role engine_setup
+- Add role vm_infra
+- Add role infra
+- Add role manageiq
+- Add role hosted_engine_setup
+- Add role image_template
+- Add role shutdown_env
+
+* Mon Aug 17 2020 Martin Necas <mnecas@redhat.com> - 1.1.2-1
+- Add ansible changelogs
+
+* Wed Aug 12 2020 Martin Necas <mnecas@redhat.com> - 1.1.1-1
+- Fix ovirt_permission FQCNs
+
+* Wed Aug 12 2020 Martin Necas <mnecas@redhat.com> - 1.1.0-1
+- Add ovirt_vm_os_info module
+- Add ovirt_disk backup
+- Add ovirt_disk autodetect size when uploading
+- Add ovirt_host add ssh_port
+- Add ovirt_network support of removing vlan_tag
+- Fix ovirt_disk upload
+
+* Thu Apr 9 2020 Martin Necas <mnecas@redhat.com> - 1.0.0-1
+- Initial release
diff --git a/ansible_collections/ovirt/ovirt/plugins/callback/stdout.py b/ansible_collections/ovirt/ovirt/plugins/callback/stdout.py
new file mode 100644
index 000000000..c502dec5f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/callback/stdout.py
@@ -0,0 +1,57 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.callback import CallbackBase
+
+# Not only visible to ansible-doc, it also 'declares' the options the plugin
+# requires and how to configure them.
+# TODO Fix DOCUMENTATION to pass the ansible-test validate-modules
+DOCUMENTATION = '''
+ callback: stdout
+ callback_type: aggregate
+ short_description: Output the log of ansible
+ version_added: "2.0"
+ description:
+ - This callback output the log of ansible play tasks.
+'''
+
+
+class CallbackModule(CallbackBase):
+ """
+ This callback module output the information with a specific style.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'stdout'
+
+ # only needed if you ship it and don't want to enable by default
+ CALLBACK_NEEDS_WHITELIST = False
+
+ def __init__(self):
+
+ # make sure the expected objects are present, calling the base's
+ # __init__
+ super(CallbackModule, self).__init__()
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self._display.display('FAILED: %s %s' % (host, res))
+
+ def runner_on_ok(self, host, res):
+ self._display.display('OK: %s %s' % (host, res))
+
+ def runner_on_skipped(self, host, item=None):
+ self._display.display('SKIPPED: %s' % host)
+
+ def runner_on_unreachable(self, host, res):
+ self._display.display('UNREACHABLE: %s %s' % (host, res))
+
+ def runner_on_async_failed(self, host, res, jid):
+ self._display.display('ASYNC_FAILED: %s %s %s' % (host, res, jid))
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ self._display.display('IMPORTED: %s %s' % (host, imported_file))
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ self._display.display('NOTIMPORTED: %s %s' % (host, missing_file))
diff --git a/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt.py b/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt.py
new file mode 100644
index 000000000..5fee494db
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard oVirt documentation fragment
+ DOCUMENTATION = r'''
+options:
+ wait:
+ description:
+ - "C(yes) if the module should wait for the entity to get into desired state."
+ type: bool
+ default: yes
+ fetch_nested:
+ description:
+ - "If I(True) the module will fetch additional data from the API."
+ - "It will fetch IDs of the VMs disks, snapshots, etc. User can configure to fetch other
+ attributes of the nested entities by specifying C(nested_attributes)."
+ type: bool
+ default: false
+ nested_attributes:
+ description:
+ - "Specifies list of the attributes which should be fetched from the API."
+ - "This parameter apply only when C(fetch_nested) is I(true)."
+ type: list
+ elements: str
+ auth:
+ description:
+ - "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
+ suboptions:
+ username:
+ description:
+ - The name of the user, something like I(admin@internal).
+ - Default value is set by C(OVIRT_USERNAME) environment variable.
+ type: str
+ password:
+ description:
+ - The password of the user.
+ - Default value is set by C(OVIRT_PASSWORD) environment variable.
+ type: str
+ url:
+ description:
+ - A string containing the API URL of the server, usually something like `I(https://server.example.com/ovirt-engine/api)`.
+ - Default value is set by C(OVIRT_URL) environment variable.
+ - Either C(url) or C(hostname) is required.
+ type: str
+ hostname:
+ description:
+ - A string containing the hostname of the server, usually something like `I(server.example.com)`.
+ - Default value is set by C(OVIRT_HOSTNAME) environment variable.
+ - Either C(url) or C(hostname) is required.
+ type: str
+ token:
+ description:
+ - Token to be used instead of login with username/password.
+ - Default value is set by C(OVIRT_TOKEN) environment variable.
+ type: str
+ insecure:
+ description:
+ - A boolean flag that indicates if the server TLS certificate and host name should be checked.
+ type: bool
+ default: false
+ ca_file:
+ description:
+ - A PEM file containing the trusted CA certificates.
+ - The certificate presented by the server will be verified using these CA certificates.
+ - If C(ca_file) parameter is not set, system wide CA certificate store is used.
+ - Default value is set by C(OVIRT_CAFILE) environment variable.
+ type: str
+ kerberos:
+ description:
+ - A boolean flag indicating if Kerberos authentication should be used instead of the default basic authentication.
+ type: bool
+ headers:
+ description:
+ - Dictionary of HTTP headers to be added to each API call.
+ type: dict
+ timeout:
+ description: Number of seconds to wait for response.
+ type: int
+ compress:
+ description: Flag indicating if compression is used for connection.
+ type: bool
+ default: true
+ type: dict
+ required: true
+ timeout:
+ description:
+ - "The amount of time in seconds the module should wait for the instance to
+ get into desired state."
+ type: int
+ default: 180
+ poll_interval:
+ description:
+ - "Number of the seconds the module waits until another poll request on entity status is sent."
+ type: int
+ default: 3
+requirements:
+ - python >= 2.7
+ - ovirt-engine-sdk-python >= 4.4.0
+notes:
+ - "In order to use this module you have to install oVirt Python SDK.
+ To ensure it's installed with correct version you can create the following task:
+ I(pip: name=ovirt-engine-sdk-python version=4.4.0)"
+'''
diff --git a/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt_info.py b/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt_info.py
new file mode 100644
index 000000000..8a84a577d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt_info.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # info standard oVirt documentation fragment
+ DOCUMENTATION = r'''
+options:
+ fetch_nested:
+ description:
+ - If I(yes) the module will fetch additional data from the API.
+ - It will fetch only IDs of nested entity. It doesn't fetch multiple levels of nested attributes.
+ Only the attributes of the current entity. User can configure to fetch other
+ attributes of the nested entities by specifying C(nested_attributes).
+ - This parameter is deprecated and replaced by C(follow).
+ type: bool
+ default: false
+ nested_attributes:
+ description:
+ - Specifies list of the attributes which should be fetched from the API.
+ - This parameter apply only when C(fetch_nested) is I(true).
+ - This parameter is deprecated and replaced by C(follow).
+ type: list
+ elements: str
+ auth:
+ description:
+ - "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
+ suboptions:
+ username:
+ description:
+ - The name of the user, something like I(admin@internal).
+ - Default value is set by C(OVIRT_USERNAME) environment variable.
+ type: str
+ password:
+ description:
+ - The password of the user.
+ - Default value is set by C(OVIRT_PASSWORD) environment variable.
+ type: str
+ url:
+ description:
+ - A string containing the API URL of the server, usually something like `I(https://server.example.com/ovirt-engine/api)`.
+ - Default value is set by C(OVIRT_URL) environment variable.
+ - Either C(url) or C(hostname) is required.
+ type: str
+ hostname:
+ description:
+ - A string containing the hostname of the server, usually something like `I(server.example.com)`.
+ - Default value is set by C(OVIRT_HOSTNAME) environment variable.
+ - Either C(url) or C(hostname) is required.
+ type: str
+ token:
+ description:
+ - Token to be used instead of login with username/password.
+ - Default value is set by C(OVIRT_TOKEN) environment variable.
+ type: str
+ insecure:
+ description:
+ - A boolean flag that indicates if the server TLS certificate and host name should be checked.
+ type: bool
+ default: false
+ ca_file:
+ description:
+ - A PEM file containing the trusted CA certificates.
+ - The certificate presented by the server will be verified using these CA certificates.
+ - If C(ca_file) parameter is not set, system wide CA certificate store is used.
+ - Default value is set by C(OVIRT_CAFILE) environment variable.
+ type: str
+ kerberos:
+ description:
+ - A boolean flag indicating if Kerberos authentication should be used instead of the default basic authentication.
+ type: bool
+ headers:
+ description:
+ - Dictionary of HTTP headers to be added to each API call.
+ type: dict
+ timeout:
+ description: Number of seconds to wait for response.
+ type: int
+ compress:
+ description: Flag indicating if compression is used for connection.
+ type: bool
+ default: true
+ type: dict
+ required: true
+requirements:
+ - python >= 2.7
+ - ovirt-engine-sdk-python >= 4.4.0
+notes:
+ - "In order to use this module you have to install oVirt Python SDK.
+ To ensure it's installed with correct version you can create the following task:
+ pip: name=ovirt-engine-sdk-python version=4.4.0"
+'''
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/convert_to_bytes.py b/ansible_collections/ovirt/ovirt/plugins/filter/convert_to_bytes.py
new file mode 100644
index 000000000..52d76a389
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/convert_to_bytes.py
@@ -0,0 +1,21 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import convert_to_bytes
+
+
+class FilterModule(object):
+
+ def filters(self):
+ 'Define filters'
+ return {
+ 'convert_to_bytes': self.convert_to_bytes,
+ }
+
+ def convert_to_bytes(self, param):
+ """
+ Filter to convert units to bytes, which follow IEC standard.
+
+ :param param: value to be converted
+ """
+ return convert_to_bytes(param)
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/convert_to_bytes.yml b/ansible_collections/ovirt/ovirt/plugins/filter/convert_to_bytes.yml
new file mode 100644
index 000000000..db0abf081
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/convert_to_bytes.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: convert_to_bytes
+ author: Martin Necas (@mnecas)
+ # version_added: 'historical'
+ short_description: Convert units to bytes
+ description:
+ - This method convert units to bytes, which follow IEC standard
+ positional: _input
+ options:
+ _input:
+ description: Value to be converted
+ type: string
+ required: true
+
+EXAMPLES: |
+ - name: Get number of bytes
+ ansible.builtin.set_fact:
+ disk_size: "{{ '1KiB' | ovirt.ovirt.convert_to_bytes }}"
+RETURN:
+ _value:
+ description: Amount of bytes
+ type: int
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/filtervalue.yml b/ansible_collections/ovirt/ovirt/plugins/filter/filtervalue.yml
new file mode 100644
index 000000000..889fc9a7f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/filtervalue.yml
@@ -0,0 +1,30 @@
+DOCUMENTATION:
+ name: filtervalue
+ author: Martin Necas (@mnecas)
+ short_description: Filter to findall occurance of some value in dict
+ description:
+ - Filter to findall occurance of some value in dict
+ positional: _input, attr, value
+ options:
+ _input:
+ description: List of VMs
+ type: list
+ required: true
+ attr:
+ description: Attribute to sotr by
+ type: list
+ required: true
+ value:
+ description: List of VMs
+ type: list
+ required: true
+
+EXAMPLES: |
+ - name: Set filtred ovirt_vms
+ ansible.builtin.set_fact:
+ ovirt_vms: "{{ vms | ovirt.ovirt.filtervalue('name', item.name) }}"
+
+RETURN:
+ _value:
+ description: Filtred VMs
+ type: list
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/get_network_xml_to_dict.yml b/ansible_collections/ovirt/ovirt/plugins/filter/get_network_xml_to_dict.yml
new file mode 100644
index 000000000..7d2e3373f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/get_network_xml_to_dict.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: get_network_xml_to_dict
+ author: Martin Necas (@mnecas)
+ short_description: Get network bridge and uuid to dict
+ description:
+ - Get network bridge and uuid to dict
+ positional: _input
+ options:
+ _input:
+ description: xml
+ type: string
+ required: true
+
+EXAMPLES: |
+ - name: Set network_dict from default_net_xml
+ ansible.builtin.set_fact:
+ network_dict: "{{ default_net_xml['stdout'] | ovirt.ovirt.get_network_xml_to_dict }}"
+
+RETURN:
+ _value:
+ description: Dict of network
+ type: dict
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/get_ovf_disk_size.py b/ansible_collections/ovirt/ovirt/plugins/filter/get_ovf_disk_size.py
new file mode 100644
index 000000000..9a38b3d04
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/get_ovf_disk_size.py
@@ -0,0 +1,31 @@
+"Module to create filter to find ovf disk size from xml"
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleFilterError
+
+import xml.etree.ElementTree as ET
+
+
+def get_ovf_disk_size(data):
+ try:
+ root = ET.fromstring(data)
+ for child in root:
+ for element in child:
+ if element.tag == "Disk":
+ return element.attrib.get(
+ "{http://schemas.dmtf.org/ovf/envelope/1/}size"
+ )
+ except Exception as e:
+ raise AnsibleFilterError(
+ "Error in get_ovf_disk_size filter plugin:\n%s" % e
+ )
+
+
+class FilterModule(object):
+ """OVF disk size filter"""
+
+ def filters(self):
+ return {
+ "get_ovf_disk_size": get_ovf_disk_size
+ }
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/get_ovf_disk_size.yml b/ansible_collections/ovirt/ovirt/plugins/filter/get_ovf_disk_size.yml
new file mode 100644
index 000000000..95dc52f37
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/get_ovf_disk_size.yml
@@ -0,0 +1,25 @@
+DOCUMENTATION:
+ name: get_ovf_disk_size
+ author: Asaf Rachmani (@arachmani)
+ # version_added: 'historical'
+ short_description: Get OVF disk size
+ description:
+ - Get OVF disk size.
+ positional: _input
+ options:
+ _input:
+ description: OVF data
+ type: string
+ required: true
+
+EXAMPLES: |
+ - name: Get ovf data
+ ansible.builtin.command: cat "{{ path }}"
+ register: ovf_data
+ - name: Get disk size from ovf data
+ ansible.builtin.set_fact:
+ disk_size: "{{ ovf_data['stdout'] | ovirt.ovirt.get_ovf_disk_size }}"
+RETURN:
+ _value:
+ description: OVF disk size
+ type: string
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/json_query.py b/ansible_collections/ovirt/ovirt/plugins/filter/json_query.py
new file mode 100644
index 000000000..9c835e8c7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/json_query.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleFilterError
+
+try:
+ import jmespath
+ HAS_LIB = True
+except ImportError:
+ HAS_LIB = False
+
+
+def json_query(data, expr):
+ '''Query data using jmespath query language ( http://jmespath.org ). Example:
+ - ansible.builtin.debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
+ '''
+ if not HAS_LIB:
+ raise AnsibleError('You need to install "jmespath" prior to running '
+ 'json_query filter')
+
+ # Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence
+ # See issue: https://github.com/ansible-collections/community.general/issues/320
+ jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
+ jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', )
+ jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', )
+ try:
+ return jmespath.search(expr, data)
+ except jmespath.exceptions.JMESPathError as e:
+ raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
+ except Exception as e:
+ # For older jmespath, we can get ValueError and TypeError without much info.
+ raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'json_query': json_query
+ }
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/json_query.yml b/ansible_collections/ovirt/ovirt/plugins/filter/json_query.yml
new file mode 100644
index 000000000..91d942811
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/json_query.yml
@@ -0,0 +1,20 @@
+DOCUMENTATION:
+ name: json_query
+ short_description: Copy of community.general.json_query
+ description:
+ - Copy of community.general.json_query used internally in the collection to ease RPM packaging, so we don't need to package/release/support the whole community.general collection for RHV customers
+ - The original can be found at link https://github.com/ansible-collections/community.general/blob/main/plugins/filter/json_query.py
+ positional: _input
+ options:
+ _input:
+ description: Value to be converted
+ type: string
+ required: true
+
+EXAMPLES: |
+ Query data using jmespath query language ( http://jmespath.org ). Example:
+ - ansible.builtin.debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
+RETURN:
+ _value:
+ description: query
+ type: string
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/ovirtdiff.yml b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtdiff.yml
new file mode 100644
index 000000000..057de0744
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtdiff.yml
@@ -0,0 +1,35 @@
+DOCUMENTATION:
+ name: ovirtdiff
+ author: Martin Necas (@mnecas)
+ short_description: Show what will be changed in next run of the VM
+ description:
+ - Show what will be changed in next run of the VM
+ positional: _input
+ options:
+ _input:
+ description: VM
+ type: dict
+ required: true
+
+EXAMPLES: |
+ - name: Get VM myvm
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: 'name={{ myvm }}'
+ next_run: false
+ register: vm
+
+ - name: Get next_run of VM myvm
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: 'name={{ myvm }}'
+ next_run: true
+ register: vm_next_run
+
+ - name: Print what will be changed in next run of the VM
+ debug:
+ msg: "{{ vm.ovirt_vms[0] | ovirt.ovirt.ovirtdiff(vm_next_run.ovirt_vms[0]) }}"
+RETURN:
+ _value:
+ description: VM
+ type: dict
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.py b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.py
new file mode 100644
index 000000000..41d06d702
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.py
@@ -0,0 +1,158 @@
+'Module to create filter to find IP addresses in VMs'
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from xml.etree import ElementTree
+
+
+class FilterModule(object):
+ 'Filter for IP addresses on newly created VMs'
+
+ def filters(self):
+ 'Define filters'
+ return {
+ 'ovirtvmip': self.ovirtvmip,
+ 'ovirtvmips': self.ovirtvmips,
+ 'ovirtvmipv4': self.ovirtvmipv4,
+ 'ovirtvmipsv4': self.ovirtvmipsv4,
+ 'ovirtvmipv6': self.ovirtvmipv6,
+ 'ovirtvmipsv6': self.ovirtvmipsv6,
+ 'filtervalue': self.filtervalue,
+ 'removesensitivevmdata': self.removesensitivevmdata,
+ 'ovirtdiff': self.ovirtdiff,
+ 'get_network_xml_to_dict': self.get_network_xml_to_dict,
+ }
+
+ def ovirtdiff(self, vm1, vm2):
+ """
+ This filter takes two dictionaries of two different resources and compare
+ them. It return dictionari with keys 'before' and 'after', where 'before'
+ containes old values of resources and 'after' contains new values.
+ This is mainly good to compare current VM object and next run VM object to see
+ the difference for the next_run.
+ """
+ before = []
+ after = []
+ if vm1.get('next_run_configuration_exists'):
+ keys = [
+ key for key in set(list(vm1.keys()) + list(vm2.keys()))
+ if (key in vm1 and (key not in vm2 or vm2[key] != vm1[key])) or (key in vm2 and (key not in vm1 or vm1[key] != vm2[key]))
+ ]
+ for key in keys:
+ before.append((key, vm1.get(key)))
+ after.append((key, vm2.get(key, vm1.get(key))))
+
+ return {
+ 'before': dict(before),
+ 'after': dict(after),
+ }
+
+ def filtervalue(self, data, attr, value):
+ """ Filter to findall occurance of some value in dict """
+ items = []
+ for item in data:
+ if item[attr] == value:
+ items.append(item)
+ return items
+
+ def ovirtvmip(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return first IP'
+ return self.__get_first_ip(self.ovirtvmips(ovirt_vms, attr))
+
+ def ovirtvmips(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return list of IPs'
+ return self._parse_ips(ovirt_vms, attr=attr)
+
+ def ovirtvmipv4(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return first IPv4 IP'
+ return self.__get_first_ip(self.ovirtvmipsv4(ovirt_vms, attr, network_ip))
+
+ def ovirtvmipsv4(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return list of IPv4 IPs'
+ ips = self._parse_ips(ovirt_vms, lambda version: version == 'v4', attr)
+ if attr:
+ return dict((k, list(filter(lambda x: self.__address_in_network(x, network_ip), v))) for k, v in ips.items())
+ return list(filter(lambda x: self.__address_in_network(x, network_ip), ips))
+
+ def ovirtvmipv6(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return first IPv6 IP'
+ return self.__get_first_ip(self.ovirtvmipsv6(ovirt_vms, attr))
+
+ def ovirtvmipsv6(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return list of IPv6 IPs'
+ return self._parse_ips(ovirt_vms, lambda version: version == 'v6', attr)
+
+ def _parse_ips(self, ovirt_vms, version_condition=lambda version: True, attr=None):
+ if not isinstance(ovirt_vms, list):
+ ovirt_vms = [ovirt_vms]
+
+ if attr is None:
+ return self._parse_ips_aslist(ovirt_vms, version_condition)
+ else:
+ return self._parse_ips_asdict(ovirt_vms, version_condition, attr)
+
+ @staticmethod
+ def _parse_ips_asdict(ovirt_vms, version_condition=lambda version: True, attr=None):
+ vm_ips = {}
+ for ovirt_vm in ovirt_vms:
+ ips = []
+ for device in ovirt_vm.get('reported_devices', []):
+ for curr_ip in device.get('ips', []):
+ if version_condition(curr_ip.get('version')):
+ ips.append(curr_ip.get('address'))
+ vm_ips[ovirt_vm.get(attr)] = ips
+ return vm_ips
+
+ @staticmethod
+ def _parse_ips_aslist(ovirt_vms, version_condition=lambda version: True):
+ ips = []
+ for ovirt_vm in ovirt_vms:
+ for device in ovirt_vm.get('reported_devices', []):
+ for curr_ip in device.get('ips', []):
+ if version_condition(curr_ip.get('version')):
+ ips.append(curr_ip.get('address'))
+ return ips
+
+ @staticmethod
+ def __get_first_ip(res):
+ return res[0] if isinstance(res, list) and res else res
+
+ def __address_in_network(self, ip, net):
+ "Return boolean if IP is in network."
+ if net:
+ ipaddr = int(''.join(['%02x' % int(x) for x in ip.split('.')]), 16)
+ netstr, bits = net.split('/')
+ netaddr = int(''.join(['%02x' % int(x)
+ for x in netstr.split('.')]), 16)
+ mask = (0xffffffff << (32 - int(bits))) & 0xffffffff
+ return (ipaddr & mask) == (netaddr & mask)
+ return True
+
+ def removesensitivevmdata(self, data, key_to_remove='root_password'):
+ for value in data:
+ if key_to_remove in value:
+ value[key_to_remove] = "******"
+ if 'cloud_init' in value and key_to_remove in value['cloud_init']:
+ value['cloud_init'][key_to_remove] = "******"
+ if 'sysprep' in value and key_to_remove in value['sysprep']:
+ value['sysprep'][key_to_remove] = "******"
+ if 'profile' in value:
+ profile = value['profile']
+ if key_to_remove in profile:
+ profile[key_to_remove] = "******"
+ if 'cloud_init' in profile and key_to_remove in profile['cloud_init']:
+ profile['cloud_init'][key_to_remove] = "******"
+ if 'sysprep' in profile and key_to_remove in profile['sysprep']:
+ profile['sysprep'][key_to_remove] = "******"
+ return data
+
+ def get_network_xml_to_dict(self, data):
+ tree = ElementTree.fromstring(data)
+ resp = {}
+ for child in tree:
+ if child.tag == 'bridge':
+ resp['bridge'] = child.attrib
+ if child.tag == 'uuid':
+ resp['uuid'] = child.text
+ return resp
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.yml b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.yml
new file mode 100644
index 000000000..f9620c4da
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.yml
@@ -0,0 +1,31 @@
+DOCUMENTATION:
+ name: ovirtvmip
+ author: Martin Necas (@mnecas)
+ short_description: Return first IP
+ description:
+ - Return first IP
+ positional: _input, attr, network_ip
+ options:
+ _input:
+ description: List of VMs
+ type: list
+ required: true
+ attr:
+ description: Attribute by which the
+ type: list
+ network_ip:
+ description: Filter the IPs by network address
+ type: string
+
+EXAMPLES: |
+ - name: Print VM IP
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmip }}"
+ - name: Print VM IP
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmip(attr='name') }}"
+
+RETURN:
+ _value:
+ description: First IP
+ type: list
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmips.yml b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmips.yml
new file mode 100644
index 000000000..faa84048f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmips.yml
@@ -0,0 +1,31 @@
+DOCUMENTATION:
+ name: ovirtvmips
+ author: Martin Necas (@mnecas)
+ short_description: VM all IPs
+ description:
+ - VM all IPs
+ positional: _input
+ options:
+ _input:
+ description: List of VMs
+ type: list
+ required: true
+ attr:
+ description: Attribute by which the
+ type: list
+ network_ip:
+ description: Filter the IPs by network address
+ type: string
+
+EXAMPLES: |
+ - name: Print VM all IPs
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmips }}"
+ - name: Print VM all IPs
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmips(attr='name') }}"
+
+RETURN:
+ _value:
+ description: All IPs
+ type: list
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipsv4.yml b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipsv4.yml
new file mode 100644
index 000000000..f233d9d7b
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipsv4.yml
@@ -0,0 +1,31 @@
+DOCUMENTATION:
+ name: ovirtvmipsv4
+ author: Martin Necas (@mnecas)
+ short_description: VM IPv4
+ description:
+ - VM IPv4
+ positional: _input
+ options:
+ _input:
+ description: List of VMs
+ type: list
+ required: true
+ attr:
+ description: Attribute by which the
+ type: list
+ network_ip:
+ description: Filter the IPs by network address
+ type: string
+
+EXAMPLES: |
+ - name: Print VM all IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv4 }}"
+ - name: Print VM all IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv4(attr='name') }}"
+
+RETURN:
+ _value:
+ description: VM IPv4
+ type: list
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipsv6.yml b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipsv6.yml
new file mode 100644
index 000000000..a735dc9ae
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipsv6.yml
@@ -0,0 +1,31 @@
+DOCUMENTATION:
+ name: ovirtvmipsv6
+ author: Martin Necas (@mnecas)
+ short_description: VM IPv4
+ description:
+ - VM IPv4
+ positional: _input
+ options:
+ _input:
+ description: List of VMs
+ type: list
+ required: true
+ attr:
+ description: Attribute by which the
+ type: list
+ network_ip:
+ description: Filter the IPs by network address
+ type: string
+
+EXAMPLES: |
+ - name: Print VM all IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv6 }}"
+ - name: Print VM all IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv6(attr='name') }}"
+
+RETURN:
+ _value:
+ description: VM IPv4
+ type: list
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipv4.yml b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipv4.yml
new file mode 100644
index 000000000..15431dc03
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipv4.yml
@@ -0,0 +1,31 @@
+DOCUMENTATION:
+ name: ovirtvmipv4
+ author: Martin Necas (@mnecas)
+ short_description: VM IPv4
+ description:
+ - VM IPv4
+ positional: _input
+ options:
+ _input:
+ description: List of VMs
+ type: list
+ required: true
+ attr:
+ description: Attribute by which the
+ type: list
+ network_ip:
+ description: Filter the IPs by network address
+ type: string
+
+EXAMPLES: |
+ - name: Print VM IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv4 }}"
+ - name: Print VM IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv4(attr='name') }}"
+
+RETURN:
+ _value:
+ description: VM IPv4
+ type: list
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipv6.yml b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipv6.yml
new file mode 100644
index 000000000..a88a966f7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmipv6.yml
@@ -0,0 +1,31 @@
+DOCUMENTATION:
+ name: ovirtvmipv6
+ author: Martin Necas (@mnecas)
+ short_description: VM IPv4
+ description:
+ - VM IPv4
+ positional: _input
+ options:
+ _input:
+ description: List of VMs
+ type: list
+ required: true
+ attr:
+ description: Attribute by which the
+ type: list
+ network_ip:
+ description: Filter the IPs by network address
+ type: string
+
+EXAMPLES: |
+ - name: Print VM IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv6 }}"
+ - name: Print VM IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv6(attr='name') }}"
+
+RETURN:
+ _value:
+ description: VM IPv4
+ type: list
diff --git a/ansible_collections/ovirt/ovirt/plugins/filter/removesensitivevmdata.yml b/ansible_collections/ovirt/ovirt/plugins/filter/removesensitivevmdata.yml
new file mode 100644
index 000000000..4f6211565
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/filter/removesensitivevmdata.yml
@@ -0,0 +1,22 @@
+DOCUMENTATION:
+ name: removesensitivevmdata
+ author: Martin Necas (@mnecas)
+ short_description: removesensitivevmdata internal filter
+ description:
+ - removesensitivevmdata internal filter
+ positional: _input
+ options:
+ _input:
+ description: List of VMs
+ type: list
+ required: true
+
+EXAMPLES: |
+ - name: Print VM
+ debug:
+ msg: "{{ vms | ovirt.ovirt.removesensitivevmdata }}"
+
+RETURN:
+ _value:
+ description: List of VMs
+ type: list
diff --git a/ansible_collections/ovirt/ovirt/plugins/inventory/ovirt.py b/ansible_collections/ovirt/ovirt/plugins/inventory/ovirt.py
new file mode 100644
index 000000000..1d621241b
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/inventory/ovirt.py
@@ -0,0 +1,272 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# TODO Fix DOCUMENTATION to pass the ansible-test validate-modules
+DOCUMENTATION = '''
+ name: ovirt
+ plugin_type: inventory
+ short_description: oVirt inventory source
+ version_added: "1.0.0"
+ author: Bram Verschueren (@bverschueren)
+ requirements:
+ - ovirt-engine-sdk-python >= 4.2.4
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ description:
+ - Get inventory hosts from the ovirt service.
+ - Requires a YAML file ending in 'ovirt.yml', 'ovirt4.yml', 'ovirt.yaml', 'ovirt4.yaml'.
+ options:
+ plugin:
+ description: the name of this plugin, it should always be set to 'ovirt' for this plugin to recognise it as it's own.
+ required: True
+ choices: ['ovirt', 'ovirt.ovirt.ovirt', 'redhat.rhv.ovirt']
+ ovirt_url:
+ description: URL to ovirt-engine API.
+ required: True
+ env:
+ - name: OVIRT_URL
+ ovirt_username:
+ description: ovirt authentication user.
+ required: True
+ env:
+ - name: OVIRT_USERNAME
+ ovirt_password:
+ description: ovirt authentication password.
+ required : True
+ env:
+ - name: OVIRT_PASSWORD
+ ovirt_cafile:
+ description: path to ovirt-engine CA file. If C(ovirt_cafile) parameter is not set and C(ovirt_insecure) is not True, system wide CA certificate store\
+ is used.
+ required: False
+ ovirt_insecure:
+ description: A boolean flag that indicates if the server TLS certificate and host name should be checked.
+ required: False
+ ovirt_query_filter:
+ required: False
+ description: dictionary of filter key-values to query VM's. See U(https://ovirt.github.io/ovirt-engine-sdk/master/services.m.html#ovirtsdk4\
+.services.VmsService.list) for filter parameters.
+ ovirt_hostname_preference:
+ required: False
+ description:
+ - List of options that describe the ordering for which hostnames should be assigned.
+ - See U(https://ovirt.github.io/ovirt-engine-api-model/master/#types/vm) for available attributes.
+ default: ['fqdn', 'name']
+ type: list
+'''
+
+EXAMPLES = '''
+# Ensure the CA is available:
+# $ wget "https://engine/ovirt-engine/services/pki-resource?resource=ca-certificate&format=X509-PEM-CA" -O /path/to/ca.pem
+# Sample content of ovirt.yml:
+plugin: ovirt.ovirt.ovirt
+ovirt_url: https://engine/ovirt-engine/api
+ovirt_cafile: /path/to/ca.pem
+ovirt_username: ansible-tester
+ovirt_password: secure
+ovirt_query_filter:
+ search: 'name=myvm AND cluster=mycluster'
+ case_sensitive: no
+ max: 15
+keyed_groups:
+ - key: cluster
+ prefix: 'cluster'
+groups:
+ dev: "'dev' in tags"
+compose:
+ ansible_host: devices["eth0"][0]
+'''
+
+import sys
+
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.errors import AnsibleError, AnsibleParserError
+
+HAS_OVIRT_LIB = False
+
+try:
+ import ovirtsdk4 as sdk
+ HAS_OVIRT_LIB = True
+except ImportError:
+ HAS_OVIRT_LIB = False
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'ovirt.ovirt.ovirt'
+
+ def _get_dict_of_struct(self, vm):
+ ''' Transform SDK Vm Struct type to Python dictionary.
+ :param vm: host struct of which to create dict
+ :return dict of vm struct type
+ '''
+
+ vms_service = self.connection.system_service().vms_service()
+ clusters_service = self.connection.system_service().clusters_service()
+ vm_service = vms_service.vm_service(vm.id)
+ devices = vm_service.reported_devices_service().list()
+ tags = vm_service.tags_service().list()
+ stats = vm_service.statistics_service().list()
+ labels = vm_service.affinity_labels_service().list()
+ groups = clusters_service.cluster_service(
+ vm.cluster.id
+ ).affinity_groups_service().list()
+
+ return {
+ 'id': vm.id,
+ 'name': vm.name,
+ 'host': self.connection.follow_link(vm.host).name if vm.host else None,
+ 'cluster': self.connection.follow_link(vm.cluster).name,
+ 'status': str(vm.status),
+ 'description': vm.description,
+ 'fqdn': vm.fqdn,
+ 'os': vm.os.type,
+ 'template': self.connection.follow_link(vm.template).name,
+ 'creation_time': str(vm.creation_time),
+ 'creation_time_timestamp': float(vm.creation_time.strftime("%s.%f")),
+ 'tags': [tag.name for tag in tags],
+ 'affinity_labels': [label.name for label in labels],
+ 'affinity_groups': [
+ group.name for group in groups
+ if vm.name in [vm.name for vm in self.connection.follow_link(group.vms)]
+ ],
+ 'statistics': dict(
+ (stat.name, stat.values[0].datum if stat.values else None) for stat in stats
+ ),
+ 'devices': dict(
+ (device.name, [ip.address for ip in device.ips]) for device in devices if device.ips
+ ),
+ }
+
+ def _query(self, query_filter=None):
+ '''
+ :param query_filter: dictionary of filter parameter/values
+ :return dict of oVirt vm dicts
+ '''
+ return [self._get_dict_of_struct(host) for host in self._get_hosts(query_filter=query_filter)]
+
+ def _get_hosts(self, query_filter=None):
+ '''
+ :param filter: dictionary of vm filter parameter/values
+ :return list of oVirt vm structs
+ '''
+
+ vms_service = self.connection.system_service().vms_service()
+ if query_filter is not None:
+ return vms_service.list(**query_filter)
+ return vms_service.list()
+
+ def _get_query_options(self, param_dict):
+ ''' Get filter parameters and cast these to comply with sdk VmsService.list param types
+ :param param_dict: dictionary of filter parameters and values
+ :return dictionary with casted parameter/value
+ '''
+ if param_dict is None:
+ return None
+
+ FILTER_MAPPING = {
+ 'all_content': bool,
+ 'case_sensitive': bool,
+ 'filter': bool,
+ 'follow': str,
+ 'max': int,
+ 'search': str
+ }
+
+ casted_dict = {}
+
+ for (param, value) in param_dict.items():
+ try:
+ casted_dict[param] = FILTER_MAPPING[param](value)
+ except KeyError:
+ raise AnsibleError("Unknown filter option '{0}'".format(param))
+
+ return casted_dict
+
+ def _get_hostname(self, host):
+ '''
+ Get the host's hostname based on prefered attribute
+ :param host: dict representation of oVirt VmStruct
+ :param return: preferred hostname for the host
+ '''
+ hostname_preference = self.get_option('ovirt_hostname_preference')
+ if not hostname_preference:
+ raise AnsibleParserError('Invalid value for option ovirt_hostname_preference: {0}'.format(hostname_preference))
+ hostname = None
+
+ for preference in hostname_preference:
+ hostname = host.get(preference)
+ if hostname is not None:
+ return hostname
+
+ raise AnsibleParserError("No valid name found for host id={0}".format(host.get('id')))
+
+ def _populate_from_source(self, source_data):
+
+ for host in source_data:
+
+ hostname = self._get_hostname(host)
+
+ self.inventory.add_host(hostname)
+
+ for fact, value in host.items():
+ self.inventory.set_variable(hostname, fact, value)
+
+ strict = self.get_option('strict')
+ self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
+ self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('ovirt.yml', 'ovirt4.yml', 'ovirt.yaml', 'ovirt4.yaml')):
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ if not HAS_OVIRT_LIB:
+ raise AnsibleError('oVirt inventory script requires ovirt-engine-sdk-python >= 4.2.4')
+
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+
+ config = self._read_config_data(path)
+
+ self.connection = sdk.Connection(
+ url=self.get_option('ovirt_url'),
+ username=self.get_option('ovirt_username'),
+ password=self.get_option('ovirt_password'),
+ ca_file=self.get_option('ovirt_cafile'),
+ insecure=self.get_option('ovirt_insecure') if self.get_option('ovirt_insecure') is not None else not self.get_option('ovirt_cafile'),
+ )
+
+ query_filter = self._get_query_options(self.get_option('ovirt_query_filter', None))
+
+ cache_key = self.get_cache_key(path)
+ source_data = None
+
+ user_cache_setting = self.get_option('cache')
+ attempt_to_read_cache = user_cache_setting and cache
+ cache_needs_update = user_cache_setting and not cache
+
+ if attempt_to_read_cache:
+ try:
+ source_data = self._cache[cache_key]
+ except KeyError:
+ cache_needs_update = True
+
+ if source_data is None:
+ source_data = self._query(query_filter=query_filter)
+
+ if cache_needs_update:
+ self._cache[cache_key] = source_data
+
+ self._populate_from_source(source_data)
+ self.connection.close()
diff --git a/ansible_collections/ovirt/ovirt/plugins/module_utils/__init__.py b/ansible_collections/ovirt/ovirt/plugins/module_utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/module_utils/__init__.py
diff --git a/ansible_collections/ovirt/ovirt/plugins/module_utils/cloud.py b/ansible_collections/ovirt/ovirt/plugins/module_utils/cloud.py
new file mode 100644
index 000000000..c845cfdc7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/module_utils/cloud.py
@@ -0,0 +1,209 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+"""
+This module adds shared support for generic cloud modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+from ansible.module_utils.cloud import CloudRetry
+
+The 'cloud' module provides the following common classes:
+
+ * CloudRetry
+ - The base class to be used by other cloud providers, in order to
+ provide a backoff/retry decorator based on status codes.
+
+ - Example using the AWSRetry class which inherits from CloudRetry.
+
+ @AWSRetry.exponential_backoff(retries=10, delay=3)
+ get_ec2_security_group_ids_from_names()
+
+ @AWSRetry.jittered_backoff()
+ get_ec2_security_group_ids_from_names()
+
+"""
+import random
+from functools import wraps
+import syslog
+import time
+
+
+def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
+ """ Customizable exponential backoff strategy.
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Initial (base) delay.
+ backoff (float): base of the exponent to use for exponential
+ backoff.
+ max_delay (int): Optional. If provided each delay generated is capped
+ at this amount. Defaults to 60 seconds.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for an exponential backoff strategy.
+ Usage:
+ >>> backoff = _exponential_backoff()
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ sleep = delay * backoff ** retry
+ yield sleep if max_delay is None else min(sleep, max_delay)
+ return backoff_gen
+
+
+def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
+ """ Implements the "Full Jitter" backoff strategy described here
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Approximate number of seconds to sleep for the first
+ retry.
+ max_delay (int): The maximum number of seconds to sleep for any retry.
+ _random (random.Random or None): Makes this generator testable by
+ allowing developers to explicitly pass in the a seeded Random.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for a full jitter backoff strategy.
+ Usage:
+ >>> backoff = _full_jitter_backoff(retries=5)
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [3, 6, 5, 23, 38]
+ >>> list(backoff())
+ [2, 1, 6, 6, 31]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ yield _random.randint(0, min(max_delay, delay * 2 ** retry))
+ return backoff_gen
+
+
+class CloudRetry(object):
+ """ CloudRetry can be used by any cloud provider, in order to implement a
+ backoff algorithm/retry effect based on Status Code from Exceptions.
+ """
+ # This is the base class of the exception.
+ # AWS Example botocore.exceptions.ClientError
+ base_class = None
+
+ @staticmethod
+ def status_code_from_exception(error):
+ """ Return the status code from the exception object
+ Args:
+ error (object): The exception itself.
+ """
+ pass
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ """ Return True if the Response Code to retry on was found.
+ Args:
+ response_code (str): This is the Response Code that is being matched against.
+ """
+ pass
+
+ @classmethod
+ def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
+ """ Retry calling the Cloud decorated function using the provided
+ backoff strategy.
+ Args:
+ backoff_strategy (callable): Callable that returns a generator. The
+ generator should yield sleep times for each retry of the decorated
+ function.
+ """
+ def deco(f):
+ @wraps(f)
+ def retry_func(*args, **kwargs):
+ for delay in backoff_strategy():
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ if isinstance(e, cls.base_class):
+ response_code = cls.status_code_from_exception(e)
+ if cls.found(response_code, catch_extra_error_codes):
+ msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
+ syslog.syslog(syslog.LOG_INFO, msg)
+ time.sleep(delay)
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ return f(*args, **kwargs)
+
+ return retry_func # true decorator
+
+ return deco
+
+ @classmethod
+ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_exponential_backoff(
+ retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using a jittered backoff
+ strategy. More on this strategy here:
+
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int): Initial delay between retries in seconds
+ default=3
+ max_delay (int): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_full_jitter_backoff(
+ retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Compatibility for the original implementation of CloudRetry.backoff that
+ did not provide configurable backoff strategies. Developers should use
+ CloudRetry.exponential_backoff instead.
+
+ Kwargs:
+ tries (int): Number of times to try (not retry) before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ """
+ return cls.exponential_backoff(
+ retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
diff --git a/ansible_collections/ovirt/ovirt/plugins/module_utils/ovirt.py b/ansible_collections/ovirt/ovirt/plugins/module_utils/ovirt.py
new file mode 100644
index 000000000..6699d0532
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/module_utils/ovirt.py
@@ -0,0 +1,919 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import inspect
+import os
+import time
+
+from abc import ABCMeta, abstractmethod
+from datetime import datetime
+
+from ansible_collections.ovirt.ovirt.plugins.module_utils.cloud import CloudRetry
+from ansible_collections.ovirt.ovirt.plugins.module_utils.version import ComparableVersion
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.common._collections_compat import Mapping
+
+try:
+ from enum import Enum # enum is a ovirtsdk4 requirement
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.version as sdk_version
+ import ovirtsdk4.types as otypes
+ HAS_SDK = ComparableVersion(sdk_version.VERSION) >= ComparableVersion('4.4.0')
+except ImportError:
+ HAS_SDK = False
+
+
+BYTES_MAP = {
+ 'kib': 2**10,
+ 'mib': 2**20,
+ 'gib': 2**30,
+ 'tib': 2**40,
+ 'pib': 2**50,
+}
+
+
+def check_sdk(module):
+ if not HAS_SDK:
+ module.fail_json(
+ msg='ovirtsdk4 version 4.4.0 or higher is required for this module'
+ )
+
+
+def remove_underscore(val):
+ if val.startswith('_'):
+ val = val[1:]
+ remove_underscore(val)
+ return val
+
+
+def get_dict_of_struct_follow(struct, filter_keys):
+ if isinstance(struct, sdk.Struct):
+ res = {}
+ for key, value in struct.__dict__.items():
+ if value is None:
+ continue
+ key = remove_underscore(key)
+ if filter_keys is None or key in filter_keys:
+ res[key] = get_dict_of_struct_follow(value, filter_keys)
+ return res
+ elif isinstance(struct, Enum) or isinstance(struct, datetime):
+ return str(struct)
+ elif isinstance(struct, list) or isinstance(struct, sdk.List):
+ return [get_dict_of_struct_follow(i, filter_keys) for i in struct]
+ return struct
+
+
+def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None, filter_keys=None, follow=None):
+ """
+ Convert SDK Struct type into dictionary.
+ """
+ if follow:
+ return get_dict_of_struct_follow(struct, filter_keys)
+
+ res = {}
+
+ def resolve_href(value):
+ # Fetch nested values of struct:
+ try:
+ value = connection.follow_link(value)
+ except sdk.Error:
+ value = None
+ nested_obj = dict(
+ (attr, convert_value(getattr(value, attr)))
+ for attr in attributes if getattr(value, attr, None) is not None
+ )
+ nested_obj['id'] = getattr(value, 'id', None)
+ nested_obj['href'] = getattr(value, 'href', None)
+ return nested_obj
+
+ def convert_value(value):
+ nested = False
+
+ if isinstance(value, sdk.Struct):
+ if not fetch_nested or not value.href:
+ return get_dict_of_struct(value)
+ return resolve_href(value)
+
+ elif isinstance(value, Enum) or isinstance(value, datetime):
+ return str(value)
+ elif isinstance(value, list) or isinstance(value, sdk.List):
+ if isinstance(value, sdk.List) and fetch_nested and value.href:
+ try:
+ value = connection.follow_link(value)
+ nested = True
+ except sdk.Error:
+ value = []
+
+ ret = []
+ for i in value:
+ if isinstance(i, sdk.Struct):
+ if not nested and fetch_nested and i.href:
+ ret.append(resolve_href(i))
+ elif not nested:
+ ret.append(get_dict_of_struct(i))
+ else:
+ nested_obj = dict(
+ (attr, convert_value(getattr(i, attr)))
+ for attr in attributes if getattr(i, attr, None)
+ )
+ nested_obj['id'] = getattr(i, 'id', None)
+ ret.append(nested_obj)
+ elif isinstance(i, Enum):
+ ret.append(str(i))
+ else:
+ ret.append(i)
+ return ret
+ else:
+ return value
+
+ if struct is not None:
+ for key, value in struct.__dict__.items():
+ if value is None:
+ continue
+
+ key = remove_underscore(key)
+ if filter_keys is None:
+ res[key] = convert_value(value)
+ elif key in filter_keys:
+ res[key] = convert_value(value)
+
+ return res
+
+
+def engine_version(connection):
+ """
+ Return string representation of oVirt engine version.
+ """
+ engine_api = connection.system_service().get()
+ engine_version = engine_api.product_info.version
+ return '%s.%s' % (engine_version.major, engine_version.minor)
+
+
+def create_connection(auth):
+ """
+ Create a connection to Python SDK, from task `auth` parameter.
+ If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
+ url, username, password
+
+ If user has SSO token the `auth` dictionary has following parameters mandatory:
+ url, token
+
+ The `ca_file` parameter is mandatory in case user want to use secure connection,
+ in case user want to use insecure connection, it's mandatory to send insecure=True.
+
+ :param auth: dictionary which contains needed values for connection creation
+ :return: Python SDK connection
+ """
+
+ url = auth.get('url')
+ if url is None and auth.get('hostname') is not None:
+ url = 'https://{0}/ovirt-engine/api'.format(auth.get('hostname'))
+
+ return sdk.Connection(
+ url=url,
+ username=auth.get('username'),
+ password=auth.get('password'),
+ ca_file=auth.get('ca_file', None),
+ insecure=auth.get('insecure', False),
+ token=auth.get('token', None),
+ kerberos=auth.get('kerberos', None),
+ headers=auth.get('headers', None),
+ )
+
+
+def convert_to_bytes(param):
+ """
+ This method convert units to bytes, which follow IEC standard.
+
+ :param param: value to be converted
+ """
+ if param is None:
+ return None
+
+ # Get rid of whitespaces:
+ param = ''.join(param.split())
+
+ # Convert to bytes:
+ if len(param) > 3 and param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
+ return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
+ elif param.isdigit():
+ return int(param) * 2**10
+ else:
+ raise ValueError(
+ "Unsupported value(IEC supported): '{value}'".format(value=param)
+ )
+
+
+def follow_link(connection, link):
+ """
+ This method returns the entity of the element which link points to.
+
+ :param connection: connection to the Python SDK
+ :param link: link of the entity
+ :return: entity which link points to
+ """
+
+ if link:
+ return connection.follow_link(link)
+ else:
+ return None
+
+
+def get_link_name(connection, link):
+ """
+ This method returns the name of the element which link points to.
+
+ :param connection: connection to the Python SDK
+ :param link: link of the entity
+ :return: name of the entity, which link points to
+ """
+
+ if link:
+ return connection.follow_link(link).name
+ else:
+ return None
+
+
+def equal(param1, param2, ignore_case=False):
+ """
+ Compare two parameters and return if they are equal.
+ This parameter doesn't run equal operation if first parameter is None.
+ With this approach we don't run equal operation in case user don't
+ specify parameter in their task.
+
+ :param param1: user inputted parameter
+ :param param2: value of entity parameter
+ :return: True if parameters are equal or first parameter is None, otherwise False
+ """
+ if param1 is not None:
+ if ignore_case:
+ return param1.lower() == param2.lower()
+ return param1 == param2
+ return True
+
+
+def search_by_attributes(service, list_params=None, **kwargs):
+ """
+ Search for the entity by attributes. Nested entities don't support search
+ via REST, so in case using search for nested entity we return all entities
+ and filter them by specified attributes.
+ """
+ list_params = list_params or {}
+ # Check if 'list' method support search(look for search parameter):
+ if 'search' in inspect.getargspec(service.list)[0]:
+ res = service.list(
+ # There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
+ search=' and '.join('{0}="{1}"'.format(k, v) for k, v in kwargs.items()),
+ **list_params
+ )
+ else:
+ res = [
+ e for e in service.list(**list_params) if len([
+ k for k, v in kwargs.items() if getattr(e, k, None) == v
+ ]) == len(kwargs)
+ ]
+
+ res = res or [None]
+ return res[0]
+
+
+def search_by_name(service, name, **kwargs):
+ """
+ Search for the entity by its name. Nested entities don't support search
+ via REST, so in case using search for nested entity we return all entities
+ and filter them by name.
+
+ :param service: service of the entity
+ :param name: name of the entity
+ :return: Entity object returned by Python SDK
+ """
+ # Check if 'list' method support search(look for search parameter):
+ if 'search' in inspect.getargspec(service.list)[0]:
+ res = service.list(
+ # There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
+ search='name="{name}"'.format(name=name)
+ )
+ else:
+ res = [e for e in service.list() if e.name == name]
+
+ if kwargs:
+ res = [
+ e for e in service.list() if len([
+ k for k, v in kwargs.items() if getattr(e, k, None) == v
+ ]) == len(kwargs)
+ ]
+
+ res = res or [None]
+ return res[0]
+
+
+def get_entity(service, get_params=None):
+ """
+ Ignore SDK Error in case of getting an entity from service.
+ """
+ entity = None
+ try:
+ if get_params is not None:
+ entity = service.get(**get_params)
+ else:
+ entity = service.get()
+ except sdk.Error:
+ # We can get here 404, we should ignore it, in case
+ # of removing entity for example.
+ pass
+ return entity
+
+
+def get_id_by_name(service, name, raise_error=True, ignore_case=False):
+ """
+ Search an entity ID by it's name.
+ """
+ entity = search_by_name(service, name)
+
+ if entity is not None:
+ return entity.id
+
+ if raise_error:
+ raise Exception("Entity '%s' was not found." % name)
+
+
+def wait(
+ service,
+ condition,
+ fail_condition=lambda e: False,
+ timeout=180,
+ wait=True,
+ poll_interval=3,
+):
+ """
+ Wait until entity fulfill expected condition.
+
+ :param service: service of the entity
+ :param condition: condition to be fulfilled
+ :param fail_condition: if this condition is true, raise Exception
+ :param timeout: max time to wait in seconds
+ :param wait: if True wait for condition, if False don't wait
+ :param poll_interval: Number of seconds we should wait until next condition check
+ """
+ # Wait until the desired state of the entity:
+ if wait:
+ start = time.time()
+ while time.time() < start + timeout:
+ # Exit if the condition of entity is valid:
+ entity = get_entity(service)
+ if condition(entity):
+ return
+ elif fail_condition(entity):
+ raise Exception("Error while waiting on result state of the entity.")
+
+ # Sleep for `poll_interval` seconds if none of the conditions apply:
+ time.sleep(float(poll_interval))
+
+ raise Exception("Timeout exceed while waiting on result state of the entity.")
+
+
+def __get_auth_dict():
+ return dict(
+ type='dict',
+ apply_defaults=True,
+ required=True,
+ required_one_of=[['hostname', 'url']],
+ options=dict(
+ url=dict(
+ type='str',
+ fallback=(env_fallback, ['OVIRT_URL']),
+ ),
+ hostname=dict(
+ type='str',
+ fallback=(env_fallback, ['OVIRT_HOSTNAME']),
+ ),
+ username=dict(
+ type='str',
+ fallback=(env_fallback, ['OVIRT_USERNAME']),
+ ),
+ password=dict(
+ type='str',
+ fallback=(env_fallback, ['OVIRT_PASSWORD']),
+ no_log=True,
+ ),
+ insecure=dict(
+ type='bool',
+ default=False,
+ ),
+ token=dict(
+ type='str',
+ fallback=(env_fallback, ['OVIRT_TOKEN']),
+ no_log=False,
+ ),
+ ca_file=dict(
+ type='str',
+ fallback=(env_fallback, ['OVIRT_CAFILE']),
+ ),
+ compress=dict(
+ type='bool',
+ default=True
+ ),
+ timeout=dict(
+ type='int',
+ default=0
+ ),
+ kerberos=dict(type='bool'),
+ headers=dict(type='dict')
+ )
+ )
+
+
+def ovirt_info_full_argument_spec(**kwargs):
+ """
+ Extend parameters of info module with parameters which are common to all
+ oVirt info modules.
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ spec = dict(
+ auth=__get_auth_dict(),
+ fetch_nested=dict(default=False, type='bool'),
+ nested_attributes=dict(type='list', default=list(), elements='str'),
+ follow=dict(default=list(), type='list', elements='str', aliases=['follows']),
+ )
+ spec.update(kwargs)
+ return spec
+
+
+# Left for third-party module compatibility
+def ovirt_facts_full_argument_spec(**kwargs):
+ """
+ This is deprecated. Please use ovirt_info_full_argument_spec instead!
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ return ovirt_info_full_argument_spec(**kwargs)
+
+
+def ovirt_full_argument_spec(**kwargs):
+ """
+ Extend parameters of module with parameters which are common to all oVirt modules.
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ spec = dict(
+ auth=__get_auth_dict(),
+ timeout=dict(default=180, type='int'),
+ wait=dict(default=True, type='bool'),
+ poll_interval=dict(default=3, type='int'),
+ fetch_nested=dict(default=False, type='bool'),
+ nested_attributes=dict(type='list', default=list(), elements='str'),
+ )
+ spec.update(kwargs)
+ return spec
+
+
+def check_params(module):
+ """
+ Most modules must have either `name` or `id` specified.
+ """
+ if module.params.get('name') is None and module.params.get('id') is None:
+ module.fail_json(msg='"name" or "id" is required')
+
+
+def engine_supported(connection, version):
+ return ComparableVersion(engine_version(connection)) >= ComparableVersion(version)
+
+
+def check_support(version, connection, module, params):
+ """
+ Check if parameters used by user are supported by oVirt Python SDK
+ and oVirt engine.
+ """
+ api_version = ComparableVersion(engine_version(connection))
+ version = ComparableVersion(version)
+ for param in params:
+ if module.params.get(param) is not None:
+ return ComparableVersion(sdk_version.VERSION) >= version and api_version >= version
+
+ return True
+
+
+class BaseModule(object):
+ """
+ This is base class for oVirt modules. oVirt modules should inherit this
+ class and override method to customize specific needs of the module.
+ The only abstract method of this class is `build_entity`, which must
+ to be implemented in child class.
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, connection, module, service, changed=False):
+ self._connection = connection
+ self._module = module
+ self._service = service
+ self._changed = changed
+ self._diff = {'after': dict(), 'before': dict()}
+
+ @property
+ def changed(self):
+ return self._changed
+
+ @changed.setter
+ def changed(self, changed):
+ if not self._changed:
+ self._changed = changed
+
+ @abstractmethod
+ def build_entity(self):
+ """
+ This method should return oVirt Python SDK type, which we want to
+ create or update, initialized by values passed by Ansible module.
+
+ For example if we want to create VM, we will return following:
+ types.Vm(name=self._module.params['vm_name'])
+
+ :return: Specific instance of sdk.Struct.
+ """
+ pass
+
+ def param(self, name, default=None):
+ """
+ Return a module parameter specified by it's name.
+ """
+ return self._module.params.get(name, default)
+
+ def update_check(self, entity):
+ """
+ This method handle checks whether the entity values are same as values
+ passed to ansible module. By default we don't compare any values.
+
+ :param entity: Entity we want to compare with Ansible module values.
+ :return: True if values are same, so we don't need to update the entity.
+ """
+ return True
+
+ def pre_create(self, entity):
+ """
+ This method is called right before entity is created.
+
+ :param entity: Entity to be created or updated.
+ """
+ pass
+
+ def post_create(self, entity):
+ """
+ This method is called right after entity is created.
+
+ :param entity: Entity which was created.
+ """
+ pass
+
+ def post_update(self, entity):
+ """
+ This method is called right after entity is updated.
+
+ :param entity: Entity which was updated.
+ """
+ pass
+
+ def diff_update(self, after, update):
+ for k, v in update.items():
+ if isinstance(v, Mapping):
+ after[k] = self.diff_update(after.get(k, dict()), v)
+ else:
+ after[k] = update[k]
+ return after
+
+ def create(
+ self,
+ entity=None,
+ result_state=None,
+ fail_condition=lambda e: False,
+ search_params=None,
+ update_params=None,
+ _wait=None,
+ force_create=False,
+ **kwargs
+ ):
+ """
+ Method which is called when state of the entity is 'present'. If user
+ don't provide `entity` parameter the entity is searched using
+ `search_params` parameter. If entity is found it's updated, whether
+ the entity should be updated is checked by `update_check` method.
+ The corresponding updated entity is build by `build_entity` method.
+
+ Function executed after entity is created can optionally be specified
+ in `post_create` parameter. Function executed after entity is updated
+ can optionally be specified in `post_update` parameter.
+
+ :param entity: Entity we want to update, if exists.
+ :param result_state: State which should entity has in order to finish task.
+ :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param update_params: The params which should be passed to update method.
+ :param kwargs: Additional parameters passed when creating entity.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None and not force_create:
+ entity = self.search_entity(search_params)
+
+ self.pre_create(entity)
+
+ if entity:
+ # Entity exists, so update it:
+ entity_service = self._service.service(entity.id)
+ if not self.update_check(entity):
+ new_entity = self.build_entity()
+ if not self._module.check_mode:
+ update_params = update_params or {}
+ updated_entity = entity_service.update(
+ new_entity,
+ **update_params
+ )
+ self.post_update(entity)
+
+ # Update diffs only if user specified --diff parameter,
+ # so we don't useless overload API:
+ if self._module._diff:
+ before = get_dict_of_struct(
+ entity,
+ self._connection,
+ fetch_nested=True,
+ attributes=['name'],
+ )
+ after = before.copy()
+ self.diff_update(after, get_dict_of_struct(new_entity))
+ self._diff['before'] = before
+ self._diff['after'] = after
+
+ self.changed = True
+ else:
+ # Entity don't exists, so create it:
+ if not self._module.check_mode:
+ entity = self._service.add(
+ self.build_entity(),
+ **kwargs
+ )
+ self.post_create(entity)
+ self.changed = True
+
+ if not self._module.check_mode:
+ # Wait for the entity to be created and to be in the defined state:
+ entity_service = self._service.service(entity.id)
+
+ def state_condition(entity):
+ return entity
+
+ if result_state:
+
+ def state_condition(entity):
+ return entity and entity.status == result_state
+
+ wait(
+ service=entity_service,
+ condition=state_condition,
+ fail_condition=fail_condition,
+ wait=_wait if _wait is not None else self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+
+ return {
+ 'changed': self.changed,
+ 'id': getattr(entity, 'id', None),
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ 'diff': self._diff,
+ }
+
+ def pre_remove(self, entity):
+ """
+ This method is called right before entity is removed.
+
+ :param entity: Entity which we want to remove.
+ """
+ pass
+
+ def entity_name(self, entity):
+ return "{e_type} '{e_name}'".format(
+ e_type=type(entity).__name__.lower(),
+ e_name=getattr(entity, 'name', None),
+ )
+
+ def remove(self, entity=None, search_params=None, **kwargs):
+ """
+ Method which is called when state of the entity is 'absent'. If user
+ don't provide `entity` parameter the entity is searched using
+ `search_params` parameter. If entity is found it's removed.
+
+ Function executed before remove is executed can optionally be specified
+ in `pre_remove` parameter.
+
+ :param entity: Entity we want to remove.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param kwargs: Additional parameters passed when removing entity.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None:
+ entity = self.search_entity(search_params)
+
+ if entity is None:
+ return {
+ 'changed': self.changed,
+ 'msg': "Entity wasn't found."
+ }
+
+ self.pre_remove(entity)
+
+ entity_service = self._service.service(entity.id)
+ if not self._module.check_mode:
+ entity_service.remove(**kwargs)
+ wait(
+ service=entity_service,
+ condition=lambda entity: not entity,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+ self.changed = True
+
+ return {
+ 'changed': self.changed,
+ 'id': entity.id,
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ }
+
+ def action(
+ self,
+ action,
+ entity=None,
+ action_condition=lambda e: e,
+ wait_condition=lambda e: e,
+ fail_condition=lambda e: False,
+ pre_action=lambda e: e,
+ post_action=lambda e: None,
+ search_params=None,
+ **kwargs
+ ):
+ """
+ This method is executed when we want to change the state of some oVirt
+ entity. The action to be executed on oVirt service is specified by
+ `action` parameter. Whether the action should be executed can be
+ specified by passing `action_condition` parameter. State which the
+ entity should be in after execution of the action can be specified
+ by `wait_condition` parameter.
+
+ Function executed before an action on entity can optionally be specified
+ in `pre_action` parameter. Function executed after an action on entity can
+ optionally be specified in `post_action` parameter.
+
+ :param action: Action which should be executed by service on entity.
+ :param entity: Entity we want to run action on.
+ :param action_condition: Function which is executed when checking if action should be executed.
+ :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
+ :param wait_condition: Function which is executed when waiting on result state.
+ :param pre_action: Function which is executed before running the action.
+ :param post_action: Function which is executed after running the action.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param kwargs: Additional parameters passed to action.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None:
+ entity = self.search_entity(search_params)
+
+ entity = pre_action(entity)
+
+ if entity is None:
+ self._module.fail_json(
+ msg="Entity not found, can't run action '{0}'.".format(
+ action
+ )
+ )
+
+ entity_service = self._service.service(entity.id)
+ entity = entity_service.get()
+ if action_condition(entity):
+ if not self._module.check_mode:
+ getattr(entity_service, action)(**kwargs)
+ self.changed = True
+
+ post_action(entity)
+
+ wait(
+ service=self._service.service(entity.id),
+ condition=wait_condition,
+ fail_condition=fail_condition,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+ return {
+ 'changed': self.changed,
+ 'id': entity.id,
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ 'diff': self._diff,
+ }
+
+ def wait_for_import(self, condition=lambda e: True):
+ if self._module.params['wait']:
+ start = time.time()
+ timeout = self._module.params['timeout']
+ poll_interval = self._module.params['poll_interval']
+ while time.time() < start + timeout:
+ entity = self.search_entity()
+ if entity and condition(entity):
+ return entity
+ time.sleep(poll_interval)
+
+ def search_entity(self, search_params=None, list_params=None):
+ """
+ Always first try to search by `ID`, if ID isn't specified,
+ check if user constructed special search in `search_params`,
+ if not search by `name`.
+ """
+ entity = None
+
+ if 'id' in self._module.params and self._module.params['id'] is not None:
+ entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params)
+ elif search_params is not None:
+ entity = search_by_attributes(self._service, list_params=list_params, **search_params)
+ elif self._module.params.get('name') is not None:
+ entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name'])
+
+ return entity
+
+ def _get_major(self, full_version):
+ if full_version is None or full_version == "":
+ return None
+ if isinstance(full_version, otypes.Version):
+ return int(full_version.major)
+ return int(full_version.split('.')[0])
+
+ def _get_minor(self, full_version):
+ if full_version is None or full_version == "":
+ return None
+ if isinstance(full_version, otypes.Version):
+ return int(full_version.minor)
+ return int(full_version.split('.')[1])
+
+
+def _sdk4_error_maybe():
+ """
+ Allow for ovirtsdk4 not being installed.
+ """
+ if HAS_SDK:
+ return sdk.Error
+ return type(None)
+
+
+class OvirtRetry(CloudRetry):
+ base_class = _sdk4_error_maybe()
+
+ @staticmethod
+ def status_code_from_exception(error):
+ return error.code
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ # This is a list of error codes to retry.
+ retry_on = [
+ # HTTP status: Conflict
+ 409,
+ ]
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+
+ return response_code in retry_on
diff --git a/ansible_collections/ovirt/ovirt/plugins/module_utils/version.py b/ansible_collections/ovirt/ovirt/plugins/module_utils/version.py
new file mode 100644
index 000000000..05e905fa6
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/module_utils/version.py
@@ -0,0 +1,52 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+
+class ComparableVersion:
+ component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def __eq__(self, other):
+ return self._cmp(other) == 0
+
+ def __lt__(self, other):
+ return self._cmp(other) < 0
+
+ def __le__(self, other):
+ return self._cmp(other) <= 0
+
+ def __gt__(self, other):
+ return self._cmp(other) > 0
+
+ def __ge__(self, other):
+ return self._cmp(other) >= 0
+
+ def parse(self, vstring):
+ self.vstring = vstring
+ components = [x for x in self.component_re.split(vstring)
+ if x and x != '.']
+ for i, obj in enumerate(components):
+ try:
+ components[i] = int(obj)
+ except ValueError:
+ pass
+
+ self.version = components
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = ComparableVersion(other)
+ elif not isinstance(other, ComparableVersion):
+ return NotImplemented
+
+ if self.version == other.version:
+ return 0
+ if self.version < other.version:
+ return -1
+ if self.version > other.version:
+ return 1
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/__init__.py b/ansible_collections/ovirt/ovirt/plugins/modules/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/__init__.py
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_group.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_group.py
new file mode 100644
index 000000000..98a1fa918
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_group.py
@@ -0,0 +1,372 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_group
+short_description: Module to manage affinity groups in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "This module manage affinity groups in oVirt/RHV. It can also manage assignments
+ of those groups to VMs."
+options:
+ name:
+ description:
+ - Name of the affinity group to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - Should the affinity group be present or absent.
+ choices: [ absent, present ]
+ type: str
+ default: present
+ cluster:
+ description:
+ - Name of the cluster of the affinity group.
+ type: str
+ required: true
+ description:
+ description:
+ - Description of the affinity group.
+ type: str
+ host_enforcing:
+ description:
+ - If I(yes) VM cannot start on host if it does not satisfy the C(host_rule).
+ - This parameter is support since oVirt/RHV 4.1 version.
+ type: bool
+ host_rule:
+ description:
+ - If I(positive) I(all) VMs in this group should run on the this host.
+ - If I(negative) I(no) VMs in this group should run on the this host.
+ - If I(disabled) this affinity group doesn't take effect.
+ - This parameter is support since oVirt/RHV 4.1 version.
+ choices: [ disabled, negative, positive ]
+ type: str
+ vm_enforcing:
+ description:
+ - If I(yes) VM cannot start if it does not satisfy the C(vm_rule).
+ type: bool
+ vm_rule:
+ description:
+ - If I(positive) I(all) VMs in this group should run on the host defined by C(host_rule).
+ - If I(negative) I(no) VMs in this group should run on the host defined by C(host_rule).
+ - If I(disabled) this affinity group doesn't take effect.
+ choices: [ disabled, negative, positive ]
+ type: str
+ vms:
+ description:
+ - List of the VMs names, which should have assigned this affinity group.
+ type: list
+ elements: str
+ hosts:
+ description:
+ - List of the hosts names, which should have assigned this affinity group.
+ - This parameter is support since oVirt/RHV 4.1 version.
+ type: list
+ elements: str
+ vms_labels:
+ description:
+ - List of the hosts lable names, which should have assigned this affinity group.
+ type: list
+ elements: str
+ hosts_labels:
+ description:
+ - List of the hosts lable names, which should have assigned this affinity group.
+ type: list
+ elements: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Create(if not exists) and assign affinity group to VMs vm1 and vm2 and host host1
+ ovirt.ovirt.ovirt_affinity_group:
+ name: mygroup
+ cluster: mycluster
+ vm_enforcing: true
+ vm_rule: positive
+ host_enforcing: true
+ host_rule: positive
+ vms:
+ - vm1
+ - vm2
+ hosts:
+ - host1
+
+- name: Detach VMs from affinity group and disable VM rule
+ ovirt.ovirt.ovirt_affinity_group:
+ name: mygroup
+ cluster: mycluster
+ vm_enforcing: false
+ vm_rule: disabled
+ host_enforcing: true
+ host_rule: positive
+ vms: []
+ hosts:
+ - host1
+ - host2
+
+- name: Remove affinity group
+ ovirt.ovirt.ovirt_affinity_group:
+ state: absent
+ cluster: mycluster
+ name: mygroup
+'''
+
+RETURN = '''
+id:
+ description: ID of the affinity group which is managed
+ returned: On success if affinity group is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+affinity_group:
+ description: "Dictionary of all the affinity group attributes. Affinity group attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_group."
+ returned: On success if affinity group is found.
+ type: str
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_support,
+ create_connection,
+ get_id_by_name,
+ equal,
+ engine_supported,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+class AffinityGroupsModule(BaseModule):
+
+ def __init__(self, vm_ids, host_ids, host_label_ids, vm_label_ids, *args, **kwargs):
+ super(AffinityGroupsModule, self).__init__(*args, **kwargs)
+ self._vm_ids = vm_ids
+ self._host_ids = host_ids
+ self._host_label_ids = host_label_ids
+ self._vm_label_ids = vm_label_ids
+
+ def update_vms(self, affinity_group):
+ """
+ This method iterate via the affinity VM assignments and datech the VMs
+ which should not be attached to affinity and attach VMs which should be
+ attached to affinity.
+ """
+ assigned_vms = self.assigned_vms(affinity_group)
+ to_remove = list(vm for vm in assigned_vms if vm not in self._vm_ids)
+ to_add = []
+ if self._vm_ids:
+ to_add = list(vm for vm in self._vm_ids if vm not in assigned_vms)
+ ag_service = self._service.group_service(affinity_group.id)
+ for vm in to_remove:
+ ag_service.vms_service().vm_service(vm).remove()
+ for vm in to_add:
+ # API return <action> element instead of VM element, so we
+ # need to WA this issue, for oVirt/RHV versions having this bug:
+ try:
+ ag_service.vms_service().add(otypes.Vm(id=vm))
+ except ValueError as ex:
+ if 'complete' not in str(ex):
+ raise ex
+
+ def post_create(self, entity):
+ self.update_vms(entity)
+
+ def post_update(self, entity):
+ self.update_vms(entity)
+
+ def build_entity(self):
+ affinity_group = otypes.AffinityGroup(
+ name=self._module.params['name'],
+ description=self._module.params['description'],
+ positive=(
+ self._module.params['vm_rule'] == 'positive'
+ ) if self._module.params['vm_rule'] is not None else None,
+ enforcing=(
+ self._module.params['vm_enforcing']
+ ) if self._module.params['vm_enforcing'] is not None else None,
+ )
+
+ # Those attributes are Supported since 4.1:
+ if not engine_supported(self._connection, '4.1'):
+ return affinity_group
+
+ affinity_group.hosts_rule = otypes.AffinityRule(
+ positive=(
+ self.param('host_rule') == 'positive'
+ ) if self.param('host_rule') is not None else None,
+ enforcing=self.param('host_enforcing'),
+ ) if (
+ self.param('host_enforcing') is not None or
+ self.param('host_rule') is not None
+ ) else None
+
+ affinity_group.vms_rule = otypes.AffinityRule(
+ positive=(
+ self.param('vm_rule') == 'positive'
+ ) if self.param('vm_rule') is not None else None,
+ enforcing=self.param('vm_enforcing'),
+ enabled=(
+ self.param('vm_rule') in ['negative', 'positive']
+ ) if self.param('vm_rule') is not None else None,
+ ) if (
+ self.param('vm_enforcing') is not None or
+ self.param('vm_rule') is not None
+ ) else None
+
+ affinity_group.hosts = [
+ otypes.Host(id=host_id) for host_id in self._host_ids
+ ] if self._host_ids is not None else None
+ affinity_group.vm_labels = [
+ otypes.AffinityLabel(id=host_id) for host_id in self._vm_label_ids
+ ] if self._vm_label_ids is not None else None
+ affinity_group.host_labels = [
+ otypes.AffinityLabel(id=host_id) for host_id in self._host_label_ids
+ ] if self._host_label_ids is not None else None
+
+ return affinity_group
+
+ def assigned_vms(self, affinity_group):
+ if getattr(affinity_group.vms, 'href', None):
+ return sorted([
+ vm.id for vm in self._connection.follow_link(affinity_group.vms)
+ ])
+ else:
+ return sorted([vm.id for vm in affinity_group.vms])
+
+ def update_check(self, entity):
+ assigned_vms = self.assigned_vms(entity)
+ do_update = (
+ equal(self.param('description'), entity.description) and equal(self.param('vm_enforcing'), entity.enforcing) and equal(
+ self.param('vm_rule') == 'positive' if self.param('vm_rule') else None,
+ entity.positive
+ ) and equal(self._vm_ids, assigned_vms)
+ )
+ # Following attributes is supported since 4.1,
+ # so return if it doesn't exist:
+ if not engine_supported(self._connection, '4.1'):
+ return do_update
+
+ # Following is supported since 4.1:
+ return do_update and (
+ equal(
+ self.param('host_rule') == 'positive' if self.param('host_rule') else None,
+ entity.hosts_rule.positive) and equal(self.param('host_enforcing'), entity.hosts_rule.enforcing) and equal(
+ self.param('vm_rule') in ['negative', 'positive'] if self.param('vm_rule') else None,
+ entity.vms_rule.enabled) and equal(self._host_ids, sorted([host.id for host in entity.hosts]))
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ cluster=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ description=dict(type='str'),
+ vm_enforcing=dict(type='bool'),
+ vm_rule=dict(type='str', choices=['disabled', 'negative', 'positive']),
+ host_enforcing=dict(type='bool'),
+ host_rule=dict(type='str', choices=['disabled', 'negative', 'positive']),
+ vms=dict(type='list', elements='str'),
+ hosts=dict(type='list', elements='str'),
+ vms_labels=dict(type='list', elements='str'),
+ hosts_labels=dict(type='list', elements='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ # Check if unsupported parameters were passed:
+ supported_41 = ('host_enforcing', 'host_rule', 'hosts')
+ if not check_support(
+ version='4.1',
+ connection=connection,
+ module=module,
+ params=supported_41,
+ ):
+ module.fail_json(
+ msg='Following parameters are supported since 4.1: {params}'.format(
+ params=supported_41,
+ )
+ )
+ clusters_service = connection.system_service().clusters_service()
+ vms_service = connection.system_service().vms_service()
+ hosts_service = connection.system_service().hosts_service()
+ affinity_labels_service = connection.system_service().affinity_labels_service()
+ cluster_name = module.params['cluster']
+ cluster = search_by_name(clusters_service, cluster_name)
+ if cluster is None:
+ raise Exception("Cluster '%s' was not found." % cluster_name)
+ cluster_service = clusters_service.cluster_service(cluster.id)
+ affinity_groups_service = cluster_service.affinity_groups_service()
+
+ # Fetch VM ids which should be assigned to affinity group:
+ vm_ids = sorted([
+ get_id_by_name(vms_service, vm_name)
+ for vm_name in module.params['vms']
+ ]) if module.params['vms'] is not None else None
+ # Fetch host ids which should be assigned to affinity group:
+ host_ids = sorted([
+ get_id_by_name(hosts_service, host_name)
+ for host_name in module.params['hosts']
+ ]) if module.params['hosts'] is not None else None
+ vm_label_ids = sorted([
+ get_id_by_name(affinity_labels_service, label_name)
+ for label_name in module.params['vms_labels']
+ ]) if module.params['vms_labels'] is not None else None
+ host_label_ids = sorted([
+ get_id_by_name(affinity_labels_service, label_name)
+ for label_name in module.params['hosts_labels']
+ ]) if module.params['hosts_labels'] is not None else None
+ affinity_groups_module = AffinityGroupsModule(
+ connection=connection,
+ module=module,
+ service=affinity_groups_service,
+ vm_ids=vm_ids,
+ host_ids=host_ids,
+ host_label_ids=host_label_ids,
+ vm_label_ids=vm_label_ids,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = affinity_groups_module.create()
+ elif state == 'absent':
+ ret = affinity_groups_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label.py
new file mode 100644
index 000000000..7c44495f1
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_label
+short_description: Module to manage affinity labels in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "This module manage affinity labels in oVirt/RHV. It can also manage assignments
+ of those labels to hosts and VMs."
+options:
+ name:
+ description:
+ - "Name of the affinity label to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the affinity label be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ cluster:
+ description:
+ - "Name of the cluster where vms and hosts resides."
+ type: str
+ vms:
+ description:
+ - "List of the VMs names, which should have assigned this affinity label."
+ type: list
+ elements: str
+ hosts:
+ description:
+ - "List of the hosts names, which should have assigned this affinity label."
+ type: list
+ elements: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create(if not exists) and assign affinity label to vms vm1 and vm2 and host host1
+- ovirt.ovirt.ovirt_affinity_label:
+ name: mylabel
+ cluster: mycluster
+ vms:
+ - vm1
+ - vm2
+ hosts:
+ - host1
+
+# To detach all VMs from label
+- ovirt.ovirt.ovirt_affinity_label:
+ name: mylabel
+ cluster: mycluster
+ vms: []
+
+# Remove affinity label
+- ovirt.ovirt.ovirt_affinity_label:
+ state: absent
+ name: mylabel
+'''
+
+RETURN = '''
+id:
+ description: ID of the affinity label which is managed
+ returned: On success if affinity label is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+affinity_label:
+ description: "Dictionary of all the affinity label attributes. Affinity label attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
+ type: dict
+ returned: On success if affinity label is found.
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from collections import defaultdict
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ ovirt_full_argument_spec,
+)
+
+
+class AffinityLabelsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.AffinityLabel(name=self._module.params['name'])
+
+ def post_create(self, entity):
+ self.update_check(entity)
+
+ def pre_remove(self, entity):
+ self._module.params['vms'] = []
+ self._module.params['hosts'] = []
+ self.update_check(entity)
+
+ def _update_label_assignments(self, entity, name, label_obj_type):
+ objs_service = getattr(self._connection.system_service(), '%s_service' % name)()
+ if self._module.params[name] is not None:
+ objs = self._connection.follow_link(getattr(entity, name))
+ objs_names = defaultdict(list)
+ for obj in objs:
+ labeled_entity = objs_service.service(obj.id).get()
+ if self._module.params['cluster'] is None:
+ objs_names[labeled_entity.name].append(obj.id)
+ elif self._connection.follow_link(labeled_entity.cluster).name == self._module.params['cluster']:
+ objs_names[labeled_entity.name].append(obj.id)
+
+ for obj in self._module.params[name]:
+ if obj not in objs_names:
+ for obj_id in objs_service.list(
+ search='name=%s and cluster=%s' % (obj, self._module.params['cluster'])
+ ):
+ label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
+ if not self._module.check_mode:
+ label_service.add(**{
+ name[:-1]: label_obj_type(id=obj_id.id)
+ })
+ self.changed = True
+
+ for obj in objs_names:
+ if obj not in self._module.params[name]:
+ label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
+ if not self._module.check_mode:
+ for obj_id in objs_names[obj]:
+ label_service.service(obj_id).remove()
+ self.changed = True
+
+ def update_check(self, entity):
+ self._update_label_assignments(entity, 'vms', otypes.Vm)
+ self._update_label_assignments(entity, 'hosts', otypes.Host)
+ return True
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ cluster=dict(default=None),
+ name=dict(required=True),
+ vms=dict(default=None, type='list', elements='str'),
+ hosts=dict(default=None, type='list', elements='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['cluster']),
+ ],
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ affinity_labels_service = connection.system_service().affinity_labels_service()
+ affinity_labels_module = AffinityLabelsModule(
+ connection=connection,
+ module=module,
+ service=affinity_labels_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = affinity_labels_module.create()
+ elif state == 'absent':
+ ret = affinity_labels_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label_info.py
new file mode 100644
index 000000000..45b242143
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label_info.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_label_info
+short_description: Retrieve information about one or more oVirt/RHV affinity labels
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV affinity labels."
+ - This module was called C(ovirt_affinity_label_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_affinity_label_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_affinity_labels), which
+ contains a list of affinity labels. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the affinity labels which should be listed."
+ type: str
+ vm:
+ description:
+ - "Name of the VM, which affinity labels should be listed."
+ type: str
+ host:
+ description:
+ - "Name of the host, which affinity labels should be listed."
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all affinity labels, which names start with C(label):
+- ovirt.ovirt.ovirt_affinity_label_info:
+ name: label*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+# Gather information about all affinity labels, which are assigned to VMs
+# which names start with C(postgres):
+- ovirt.ovirt.ovirt_affinity_label_info:
+ vm: postgres*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+# Gather information about all affinity labels, which are assigned to hosts
+# which names start with C(west):
+- ovirt.ovirt.ovirt_affinity_label_info:
+ host: west*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+# Gather information about all affinity labels, which are assigned to hosts
+# which names start with C(west) or VMs which names start with C(postgres):
+- ovirt.ovirt.ovirt_affinity_label_info:
+ host: west*
+ vm: postgres*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+'''
+
+RETURN = '''
+ovirt_affinity_labels:
+ description: "List of dictionaries describing the affinity labels. Affinity labels attributes are mapped to dictionary keys,
+ all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ affinity_labels_service = connection.system_service().affinity_labels_service()
+ labels = []
+ all_labels = affinity_labels_service.list(follow=",".join(module.params['follow']))
+ if module.params['name']:
+ labels.extend([
+ l for l in all_labels
+ if fnmatch.fnmatch(l.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ if search_by_name(hosts_service, module.params['host']) is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ labels.extend([
+ label
+ for label in all_labels
+ for host in connection.follow_link(label.hosts)
+ if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])
+ ])
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ if search_by_name(vms_service, module.params['vm']) is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ labels.extend([
+ label
+ for label in all_labels
+ for vm in connection.follow_link(label.vms)
+ if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])
+ ])
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ labels = all_labels
+
+ result = dict(
+ ovirt_affinity_labels=[
+ get_dict_of_struct(
+ struct=l,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for l in labels
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_api_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_api_info.py
new file mode 100644
index 000000000..80c783c78
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_api_info.py
@@ -0,0 +1,97 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_api_info
+short_description: Retrieve information about the oVirt/RHV API
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about the oVirt/RHV API."
+ - This module was called C(ovirt_api_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_api_info) module no longer returns C(ansible_facts)!
+options:
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/api/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+notes:
+ - "This module returns a variable C(ovirt_api),
+ which contains a information about oVirt/RHV API. You need to register the result with
+ the I(register) keyword to use it."
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information oVirt API:
+- ovirt.ovirt.ovirt_api_info:
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_api }}"
+'''
+
+RETURN = '''
+ovirt_api:
+ description: "Dictionary describing the oVirt API information.
+ Api attributes are mapped to dictionary keys,
+ all API attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/api."
+ returned: On success.
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec()
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ api = connection.system_service().get()
+ result = dict(
+ ovirt_api=get_dict_of_struct(
+ struct=api,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ )
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_auth.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_auth.py
new file mode 100644
index 000000000..11162aebb
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_auth.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_auth
+short_description: "Module to manage authentication to oVirt/RHV"
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+description:
+ - "This module authenticates to oVirt/RHV engine and creates SSO token, which should be later used in
+ all other oVirt/RHV modules, so all modules don't need to perform login and logout.
+ This module returns an Ansible fact called I(ovirt_auth). Every module can use this
+ fact as C(auth) parameter, to perform authentication."
+options:
+ state:
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - "Specifies if a token should be created or revoked."
+ type: str
+ username:
+ required: False
+ description:
+ - "The name of the user. For example: I(admin@internal)
+ Default value is set by I(OVIRT_USERNAME) environment variable."
+ type: str
+ password:
+ required: False
+ description:
+ - "The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
+ type: str
+ token:
+ required: False
+ description:
+ - "SSO token to be used instead of login with username/password.
+ Default value is set by I(OVIRT_TOKEN) environment variable."
+ type: str
+ url:
+ required: False
+ description:
+ - "A string containing the API URL of the server.
+ For example: I(https://server.example.com/ovirt-engine/api).
+ Default value is set by I(OVIRT_URL) environment variable."
+ - "Either C(url) or C(hostname) is required."
+ type: str
+ hostname:
+ required: False
+ description:
+ - "A string containing the hostname of the server.
+ For example: I(server.example.com).
+ Default value is set by I(OVIRT_HOSTNAME) environment variable."
+ - "Either C(url) or C(hostname) is required."
+ type: str
+ insecure:
+ required: False
+ description:
+ - "A boolean flag that indicates if the server TLS certificate and host name should be checked."
+ type: bool
+ ca_file:
+ required: False
+ description:
+ - "A PEM file containing the trusted CA certificates. The
+ certificate presented by the server will be verified using these CA
+ certificates. If C(ca_file) parameter is not set, system wide
+ CA certificate store is used.
+ Default value is set by I(OVIRT_CAFILE) environment variable."
+ type: path
+ timeout:
+ required: False
+ description:
+ - "The maximum total time to wait for the response, in
+ seconds. A value of zero (the default) means wait forever. If
+ the timeout expires before the response is received an exception
+ will be raised."
+ type: int
+ compress:
+ required: False
+ description:
+ - "A boolean flag indicating if the SDK should ask
+ the server to send compressed responses. The default is I(True).
+ Note that this is a hint for the server, and that it may return
+ uncompressed data even when this parameter is set to I(True)."
+ type: bool
+ default: true
+ kerberos:
+ required: False
+ default: False
+ description:
+ - "A boolean flag indicating if Kerberos authentication
+ should be used instead of the default basic authentication."
+ type: bool
+ headers:
+ required: False
+ description:
+ - "A dictionary of HTTP headers to be added to each API call."
+ type: dict
+ ovirt_auth:
+ description:
+ - "Previous run of the ovirt_auth used with C(state) absent"
+ - "Closes connection with the engine."
+ type: dict
+requirements:
+ - python >= 2.7
+ - ovirt-engine-sdk-python >= 4.4.0
+notes:
+ - "Everytime you use ovirt_auth module to obtain ticket, you need to also revoke the ticket,
+ when you no longer need it, otherwise the ticket would be revoked by engine when it expires.
+ For an example of how to achieve that, please take a look at I(examples) section."
+ - "In order to use this module you have to install oVirt/RHV Python SDK.
+ To ensure it's installed with correct version you can create the following task:
+ I(pip: name=ovirt-engine-sdk-python version=4.4.0)"
+ - "Note that in oVirt/RHV 4.1 if you want to use a user which is not administrator
+ you must enable the I(ENGINE_API_FILTER_BY_DEFAULT) variable in engine. In
+ oVirt/RHV 4.2 and later it's enabled by default."
+'''
+
+EXAMPLES = '''
+ - block:
+ # Create a vault with `ovirt_password` variable which store your
+ # oVirt/RHV user's password, and include that yaml file with variable:
+ - ansible.builtin.include_vars: ovirt_password.yml
+
+ - name: Obtain SSO token with using username/password credentials
+ ovirt.ovirt.ovirt_auth:
+ url: https://ovirt.example.com/ovirt-engine/api
+ username: admin@internal
+ ca_file: ca.pem
+ password: "{{ ovirt_password }}"
+
+ # Previous task generated I(ovirt_auth) fact, which you can later use
+ # in different modules as follows:
+ - ovirt.ovirt.ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: absent
+ name: myvm
+
+ always:
+ - name: Always revoke the SSO token
+ ovirt.ovirt.ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+
+# When user will set following environment variables:
+# OVIRT_URL = https://fqdn/ovirt-engine/api
+# OVIRT_USERNAME = admin@internal
+# OVIRT_PASSWORD = the_password
+# User can login the oVirt using environment variable instead of variables
+# in yaml file.
+# This is mainly useful when using Ansible Tower or AWX, as it will work
+# for Red Hat Virtualization credentials type.
+ - name: Obtain SSO token
+ ovirt_auth:
+ state: present
+'''
+
+RETURN = '''
+ovirt_auth:
+ description: Authentication facts, needed to perform authentication to oVirt/RHV.
+ returned: success
+ type: complex
+ contains:
+ token:
+ description: SSO token which is used for connection to oVirt/RHV engine.
+ returned: success
+ type: str
+ sample: "kdfVWp9ZgeewBXV-iq3Js1-xQJZPSEQ334FLb3eksoEPRaab07DhZ8ED8ghz9lJd-MQ2GqtRIeqhvhCkrUWQPw"
+ url:
+ description: URL of the oVirt/RHV engine API endpoint.
+ returned: success
+ type: str
+ sample: "https://ovirt.example.com/ovirt-engine/api"
+ ca_file:
+ description: CA file, which is used to verify SSL/TLS connection.
+ returned: success
+ type: str
+ sample: "ca.pem"
+ insecure:
+ description: Flag indicating if insecure connection is used.
+ returned: success
+ type: bool
+ sample: False
+ timeout:
+ description: Number of seconds to wait for response.
+ returned: success
+ type: int
+ sample: 0
+ compress:
+ description: Flag indicating if compression is used for connection.
+ returned: success
+ type: bool
+ sample: True
+ kerberos:
+ description: Flag indicating if kerberos is used for authentication.
+ returned: success
+ type: bool
+ sample: False
+ headers:
+ description: Dictionary of HTTP headers to be added to each API call.
+ returned: success
+ type: dict
+'''
+
+import os
+import traceback
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import check_sdk
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(default=None),
+ hostname=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ca_file=dict(default=None, type='path'),
+ insecure=dict(required=False, type='bool', default=None),
+ timeout=dict(required=False, type='int', default=0),
+ compress=dict(required=False, type='bool', default=True),
+ kerberos=dict(required=False, type='bool', default=False),
+ headers=dict(required=False, type='dict'),
+ state=dict(default='present', choices=['present', 'absent']),
+ token=dict(default=None, no_log=False),
+ ovirt_auth=dict(required=False, type='dict'),
+ ),
+ required_if=[
+ ('state', 'absent', ['ovirt_auth']),
+ ],
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ state = module.params.get('state')
+ if state == 'present':
+ params = module.params
+ elif state == 'absent':
+ params = module.params['ovirt_auth']
+
+ def get_required_parameter(param, env_var, required=False):
+ var = params.get(param) or os.environ.get(env_var)
+ if not var and required and state == 'present':
+ module.fail_json(msg="'%s' is a required parameter." % param)
+
+ return var
+
+ url = get_required_parameter('url', 'OVIRT_URL', required=False)
+ hostname = get_required_parameter('hostname', 'OVIRT_HOSTNAME', required=False)
+ if url is None and hostname is None:
+ module.fail_json(msg="You must specify either 'url' or 'hostname'.")
+
+ if url is None and hostname is not None:
+ url = 'https://{0}/ovirt-engine/api'.format(hostname)
+
+ username = get_required_parameter('username', 'OVIRT_USERNAME')
+ password = get_required_parameter('password', 'OVIRT_PASSWORD')
+ token = get_required_parameter('token', 'OVIRT_TOKEN')
+ ca_file = get_required_parameter('ca_file', 'OVIRT_CAFILE')
+ insecure = params.get('insecure') if params.get('insecure') is not None else not bool(ca_file)
+
+ connection = sdk.Connection(
+ url=url,
+ username=username,
+ password=password,
+ ca_file=ca_file,
+ insecure=insecure,
+ timeout=params.get('timeout'),
+ compress=params.get('compress'),
+ kerberos=params.get('kerberos'),
+ headers=params.get('headers'),
+ token=token,
+ )
+ try:
+ token = connection.authenticate()
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_auth=dict(
+ token=token,
+ url=url,
+ ca_file=ca_file,
+ insecure=insecure,
+ timeout=params.get('timeout'),
+ compress=params.get('compress'),
+ kerberos=params.get('kerberos'),
+ headers=params.get('headers'),
+ ) if state == 'present' else dict()
+ )
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ # Close the connection, but don't revoke token
+ connection.close(logout=state == 'absent')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster.py
new file mode 100644
index 000000000..df61cafcc
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster.py
@@ -0,0 +1,792 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_cluster
+short_description: Module to manage clusters in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage clusters in oVirt/RHV"
+options:
+ id:
+ description:
+ - "ID of the cluster to manage."
+ type: str
+ name:
+ description:
+ - "Name of the cluster to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the cluster be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ data_center:
+ description:
+ - "Datacenter name where cluster reside."
+ type: str
+ description:
+ description:
+ - "Description of the cluster."
+ type: str
+ comment:
+ description:
+ - "Comment of the cluster."
+ type: str
+ network:
+ description:
+ - "Management network of cluster to access cluster hosts."
+ type: str
+ ballooning:
+ description:
+ - "If I(True) enable memory balloon optimization. Memory balloon is used to
+ re-distribute / reclaim the host memory based on VM needs
+ in a dynamic way."
+ type: bool
+ aliases: ['balloon']
+ virt:
+ description:
+ - "If I(True), hosts in this cluster will be used to run virtual machines."
+ type: bool
+ gluster:
+ description:
+ - "If I(True), hosts in this cluster will be used as Gluster Storage
+ server nodes, and not for running virtual machines."
+ - "By default the cluster is created for virtual machine hosts."
+ type: bool
+ threads_as_cores:
+ description:
+ - "If I(True) the exposed host threads would be treated as cores
+ which can be utilized by virtual machines."
+ type: bool
+ ksm:
+ description:
+ - "I I(True) MoM enables to run Kernel Same-page Merging I(KSM) when
+ necessary and when it can yield a memory saving benefit that
+ outweighs its CPU cost."
+ type: bool
+ ksm_numa:
+ description:
+ - "If I(True) enables KSM C(ksm) for best performance inside NUMA nodes."
+ type: bool
+ ha_reservation:
+ description:
+ - "If I(True) enables the oVirt/RHV to monitor cluster capacity for highly
+ available virtual machines."
+ type: bool
+ trusted_service:
+ description:
+ - "If I(True) enables integration with an OpenAttestation server."
+ type: bool
+ vm_reason:
+ description:
+ - "If I(True) enables an optional reason field when a virtual machine
+ is shut down from the Manager, allowing the administrator to
+ provide an explanation for the maintenance."
+ type: bool
+ host_reason:
+ description:
+ - "If I(True) enables an optional reason field when a host is placed
+ into maintenance mode from the Manager, allowing the administrator
+ to provide an explanation for the maintenance."
+ type: bool
+ memory_policy:
+ description:
+ - "I(disabled) - Disables memory page sharing."
+ - "I(server) - Sets the memory page sharing threshold to 150% of the system memory on each host."
+ - "I(desktop) - Sets the memory page sharing threshold to 200% of the system memory on each host."
+ choices: ['disabled', 'server', 'desktop']
+ type: str
+ aliases: ['performance_preset']
+ rng_sources:
+ description:
+ - "List that specify the random number generator devices that all hosts in the cluster will use."
+ - "Supported generators are: I(hwrng) and I(random)."
+ type: list
+ elements: str
+ spice_proxy:
+ description:
+ - "The proxy by which the SPICE client will connect to virtual machines."
+ - "The address must be in the following format: I(protocol://[host]:[port])"
+ type: str
+ fence_enabled:
+ description:
+ - "If I(True) enables fencing on the cluster."
+ - "Fencing is enabled by default."
+ type: bool
+ fence_skip_if_gluster_bricks_up:
+ description:
+ - "A flag indicating if fencing should be skipped if Gluster bricks are up and running in the host being fenced."
+ - "This flag is optional, and the default value is `false`."
+ type: bool
+ fence_skip_if_gluster_quorum_not_met:
+ description:
+ - "A flag indicating if fencing should be skipped if Gluster bricks are up and running and Gluster quorum will not
+ be met without those bricks."
+ - "This flag is optional, and the default value is `false`."
+ type: bool
+ fence_skip_if_sd_active:
+ description:
+ - "If I(True) any hosts in the cluster that are Non Responsive
+ and still connected to storage will not be fenced."
+ type: bool
+ fence_skip_if_connectivity_broken:
+ description:
+ - "If I(True) fencing will be temporarily disabled if the percentage
+ of hosts in the cluster that are experiencing connectivity issues
+ is greater than or equal to the defined threshold."
+ - "The threshold can be specified by C(fence_connectivity_threshold)."
+ type: bool
+ fence_connectivity_threshold:
+ description:
+ - "The threshold used by C(fence_skip_if_connectivity_broken)."
+ type: int
+ resilience_policy:
+ description:
+ - "The resilience policy defines how the virtual machines are prioritized in the migration."
+ - "Following values are supported:"
+ - "C(do_not_migrate) - Prevents virtual machines from being migrated. "
+ - "C(migrate) - Migrates all virtual machines in order of their defined priority."
+ - "C(migrate_highly_available) - Migrates only highly available virtual machines to prevent overloading other hosts."
+ choices: ['do_not_migrate', 'migrate', 'migrate_highly_available']
+ type: str
+ migration_bandwidth:
+ description:
+ - "The bandwidth settings define the maximum bandwidth of both outgoing and incoming migrations per host."
+ - "Following bandwidth options are supported:"
+ - "C(auto) - Bandwidth is copied from the I(rate limit) [Mbps] setting in the data center host network QoS."
+ - "C(hypervisor_default) - Bandwidth is controlled by local VDSM setting on sending host."
+ - "C(custom) - Defined by user (in Mbps)."
+ choices: ['auto', 'hypervisor_default', 'custom']
+ type: str
+ migration_bandwidth_limit:
+ description:
+ - "Set the I(custom) migration bandwidth limit."
+ - "This parameter is used only when C(migration_bandwidth) is I(custom)."
+ type: int
+ migration_auto_converge:
+ description:
+ - "If I(True) auto-convergence is used during live migration of virtual machines."
+ - "Used only when C(migration_policy) is set to I(legacy)."
+ - "Following options are supported:"
+ - "C(true) - Override the global setting to I(true)."
+ - "C(false) - Override the global setting to I(false)."
+ - "C(inherit) - Use value which is set globally."
+ choices: ['true', 'false', 'inherit']
+ type: str
+ migration_compressed:
+ description:
+ - "If I(True) compression is used during live migration of the virtual machine."
+ - "Used only when C(migration_policy) is set to I(legacy)."
+ - "Following options are supported:"
+ - "C(true) - Override the global setting to I(true)."
+ - "C(false) - Override the global setting to I(false)."
+ - "C(inherit) - Use value which is set globally."
+ choices: ['true', 'false', 'inherit']
+ type: str
+ migration_encrypted:
+ description:
+ - "If I(True) encryption is used during live migration of the virtual machine."
+ - "Following options are supported:"
+ - "C(true) - Override the global setting to I(true)."
+ - "C(false) - Override the global setting to I(false)."
+ - "C(inherit) - Use value which is set globally."
+ choices: ['true', 'false', 'inherit']
+ type: str
+ migration_policy:
+ description:
+ - "A migration policy defines the conditions for live migrating
+ virtual machines in the event of host failure."
+ - "Following policies are supported:"
+ - "C(legacy) - Legacy behavior of 3.6 version."
+ - "C(minimal_downtime) - Virtual machines should not experience any significant downtime."
+ - "C(suspend_workload) - Virtual machines may experience a more significant downtime."
+ - "C(post_copy) - Virtual machines should not experience any significant downtime.
+ If the VM migration is not converging for a long time, the migration will be switched to post-copy.
+ Added in version I(2.4)."
+ choices: ['legacy', 'minimal_downtime', 'suspend_workload', 'post_copy']
+ type: str
+ serial_policy:
+ description:
+ - "Specify a serial number policy for the virtual machines in the cluster."
+ - "Following options are supported:"
+ - "C(vm) - Sets the virtual machine's UUID as its serial number."
+ - "C(host) - Sets the host's UUID as the virtual machine's serial number."
+ - "C(custom) - Allows you to specify a custom serial number in C(serial_policy_value)."
+ choices: ['vm', 'host', 'custom']
+ type: str
+ serial_policy_value:
+ description:
+ - "Allows you to specify a custom serial number."
+ - "This parameter is used only when C(serial_policy) is I(custom)."
+ type: str
+ scheduling_policy:
+ description:
+ - "Name of the scheduling policy to be used for cluster."
+ type: str
+ scheduling_policy_properties:
+ description:
+ - "Custom scheduling policy properties of the cluster."
+ - "These optional properties override the properties of the
+ scheduling policy specified by the C(scheduling_policy) parameter."
+ suboptions:
+ name:
+ description:
+ - Name of the scheduling policy property.
+ value:
+ description:
+ - Value of scheduling policy property.
+ type: list
+ elements: dict
+ cpu_arch:
+ description:
+ - "CPU architecture of cluster."
+ choices: ['x86_64', 'ppc64', 'undefined']
+ type: str
+ cpu_type:
+ description:
+ - "CPU codename. For example I(Intel SandyBridge Family)."
+ type: str
+ switch_type:
+ description:
+ - "Type of switch to be used by all networks in given cluster.
+ Either I(legacy) which is using linux bridge or I(ovs) using
+ Open vSwitch."
+ choices: ['legacy', 'ovs']
+ type: str
+ compatibility_version:
+ description:
+ - "The compatibility version of the cluster. All hosts in this
+ cluster must support at least this compatibility version."
+ type: str
+ mac_pool:
+ description:
+ - "MAC pool to be used by this cluster."
+ - "C(Note:)"
+ - "This is supported since oVirt version 4.1."
+ type: str
+ external_network_providers:
+ description:
+ - "List of references to the external network providers available
+ in the cluster. If the automatic deployment of the external
+ network provider is supported, the networks of the referenced
+ network provider are available on every host in the cluster."
+ - "This is supported since oVirt version 4.2."
+ suboptions:
+ name:
+ description:
+ - Name of the external network provider. Either C(name) or C(id) is required.
+ id:
+ description:
+ - ID of the external network provider. Either C(name) or C(id) is required.
+ type: list
+ elements: dict
+ firewall_type:
+ description:
+ - "The type of firewall to be used on hosts in this cluster."
+ - "Up to version 4.1, it was always I(iptables). Since version 4.2, you can choose between I(iptables) and I(firewalld).
+ For clusters with a compatibility version of 4.2 and higher, the default firewall type is I(firewalld)."
+ type: str
+ choices: ['firewalld', 'iptables']
+ gluster_tuned_profile:
+ description:
+ - "The name of the U(https://fedorahosted.org/tuned) to set on all the hosts in the cluster. This is not mandatory
+ and relevant only for clusters with Gluster service."
+ - "Could be for example I(virtual-host), I(rhgs-sequential-io), I(rhgs-random-io)"
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create cluster
+- ovirt.ovirt.ovirt_cluster:
+ data_center: mydatacenter
+ name: mycluster
+ cpu_type: Intel SandyBridge Family
+ description: mycluster
+ compatibility_version: 4.0
+
+# Create virt service cluster:
+- ovirt.ovirt.ovirt_cluster:
+ data_center: mydatacenter
+ name: mycluster
+ cpu_type: Intel Nehalem Family
+ description: mycluster
+ switch_type: legacy
+ compatibility_version: 4.0
+ ballooning: true
+ gluster: false
+ threads_as_cores: true
+ ha_reservation: true
+ trusted_service: false
+ host_reason: false
+ vm_reason: true
+ ksm_numa: true
+ memory_policy: server
+ rng_sources:
+ - hwrng
+ - random
+
+# Create cluster with default network provider
+- ovirt.ovirt.ovirt_cluster:
+ name: mycluster
+ data_center: Default
+ cpu_type: Intel SandyBridge Family
+ external_network_providers:
+ - name: ovirt-provider-ovn
+
+# Remove cluster
+- ovirt.ovirt.ovirt_cluster:
+ state: absent
+ name: mycluster
+
+# Change cluster Name
+- ovirt.ovirt.ovirt_cluster:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_cluster_name"
+'''
+
+RETURN = '''
+id:
+ description: ID of the cluster which is managed
+ returned: On success if cluster is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+cluster:
+ description: "Dictionary of all the cluster attributes. Cluster attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
+ type: dict
+ returned: On success if cluster is found.
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+ get_id_by_name,
+)
+
+
+class ClustersModule(BaseModule):
+
+ def __get_major(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.major
+ return int(full_version.split('.')[0])
+
+ def __get_minor(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.minor
+ return int(full_version.split('.')[1])
+
+ def param(self, name, default=None):
+ return self._module.params.get(name, default)
+
+ def _get_memory_policy(self):
+ memory_policy = self.param('memory_policy')
+ if memory_policy == 'desktop':
+ return 200
+ elif memory_policy == 'server':
+ return 150
+ elif memory_policy == 'disabled':
+ return 100
+
+ def _get_policy_id(self):
+ # These are hardcoded IDs, once there is API, please fix this.
+ # legacy - 00000000-0000-0000-0000-000000000000
+ # minimal downtime - 80554327-0569-496b-bdeb-fcbbf52b827b
+ # suspend workload if needed - 80554327-0569-496b-bdeb-fcbbf52b827c
+ # post copy - a7aeedb2-8d66-4e51-bb22-32595027ce71
+ migration_policy = self.param('migration_policy')
+ if migration_policy == 'legacy':
+ return '00000000-0000-0000-0000-000000000000'
+ elif migration_policy == 'minimal_downtime':
+ return '80554327-0569-496b-bdeb-fcbbf52b827b'
+ elif migration_policy == 'suspend_workload':
+ return '80554327-0569-496b-bdeb-fcbbf52b827c'
+ elif migration_policy == 'post_copy':
+ return 'a7aeedb2-8d66-4e51-bb22-32595027ce71'
+
+ def _get_sched_policy(self):
+ sched_policy = None
+ if self.param('scheduling_policy'):
+ sched_policies_service = self._connection.system_service().scheduling_policies_service()
+ sched_policy = search_by_name(sched_policies_service, self.param('scheduling_policy'))
+ if not sched_policy:
+ raise Exception("Scheduling policy '%s' was not found" % self.param('scheduling_policy'))
+
+ return sched_policy
+
+ def _get_mac_pool(self):
+ mac_pool = None
+ if self._module.params.get('mac_pool'):
+ mac_pool = search_by_name(
+ self._connection.system_service().mac_pools_service(),
+ self._module.params.get('mac_pool'),
+ )
+
+ return mac_pool
+
+ def _get_external_network_providers(self):
+ return self.param('external_network_providers') or []
+
+ def _get_external_network_provider_id(self, external_provider):
+ return external_provider.get('id') or get_id_by_name(
+ self._connection.system_service().openstack_network_providers_service(),
+ external_provider.get('name')
+ )
+
+ def _get_external_network_providers_entity(self):
+ if self.param('external_network_providers') is not None:
+ return [otypes.ExternalProvider(id=self._get_external_network_provider_id(external_provider))
+ for external_provider in self.param('external_network_providers')]
+
+ def build_entity(self):
+ sched_policy = self._get_sched_policy()
+ return otypes.Cluster(
+ id=self.param('id'),
+ name=self.param('name'),
+ comment=self.param('comment'),
+ description=self.param('description'),
+ ballooning_enabled=self.param('ballooning'),
+ gluster_service=self.param('gluster'),
+ virt_service=self.param('virt'),
+ threads_as_cores=self.param('threads_as_cores'),
+ ha_reservation=self.param('ha_reservation'),
+ trusted_service=self.param('trusted_service'),
+ optional_reason=self.param('vm_reason'),
+ maintenance_reason_required=self.param('host_reason'),
+ scheduling_policy=otypes.SchedulingPolicy(
+ id=sched_policy.id,
+ ) if sched_policy else None,
+ serial_number=otypes.SerialNumber(
+ policy=otypes.SerialNumberPolicy(self.param('serial_policy')),
+ value=self.param('serial_policy_value'),
+ ) if (
+ self.param('serial_policy') is not None or
+ self.param('serial_policy_value') is not None
+ ) else None,
+ migration=otypes.MigrationOptions(
+ auto_converge=otypes.InheritableBoolean(
+ self.param('migration_auto_converge'),
+ ) if self.param('migration_auto_converge') else None,
+ bandwidth=otypes.MigrationBandwidth(
+ assignment_method=otypes.MigrationBandwidthAssignmentMethod(
+ self.param('migration_bandwidth'),
+ ) if self.param('migration_bandwidth') else None,
+ custom_value=self.param('migration_bandwidth_limit'),
+ ) if (
+ self.param('migration_bandwidth') or
+ self.param('migration_bandwidth_limit')
+ ) else None,
+ compressed=otypes.InheritableBoolean(
+ self.param('migration_compressed'),
+ ) if self.param('migration_compressed') else None,
+ encrypted=otypes.InheritableBoolean(
+ self.param('migration_encrypted'),
+ ) if self.param('migration_encrypted') else None,
+ policy=otypes.MigrationPolicy(
+ id=self._get_policy_id()
+ ) if self.param('migration_policy') else None,
+ ) if (
+ self.param('migration_bandwidth') is not None or
+ self.param('migration_bandwidth_limit') is not None or
+ self.param('migration_auto_converge') is not None or
+ self.param('migration_compressed') is not None or
+ self.param('migration_encrypted') is not None or
+ self.param('migration_policy') is not None
+ ) else None,
+ error_handling=otypes.ErrorHandling(
+ on_error=otypes.MigrateOnError(
+ self.param('resilience_policy')
+ ),
+ ) if self.param('resilience_policy') else None,
+ fencing_policy=otypes.FencingPolicy(
+ enabled=self.param('fence_enabled'),
+ skip_if_gluster_bricks_up=self.param('fence_skip_if_gluster_bricks_up'),
+ skip_if_gluster_quorum_not_met=self.param('fence_skip_if_gluster_quorum_not_met'),
+ skip_if_connectivity_broken=otypes.SkipIfConnectivityBroken(
+ enabled=self.param('fence_skip_if_connectivity_broken'),
+ threshold=self.param('fence_connectivity_threshold'),
+ ) if (
+ self.param('fence_skip_if_connectivity_broken') is not None or
+ self.param('fence_connectivity_threshold') is not None
+ ) else None,
+ skip_if_sd_active=otypes.SkipIfSdActive(
+ enabled=self.param('fence_skip_if_sd_active'),
+ ) if self.param('fence_skip_if_sd_active') is not None else None,
+ ) if (
+ self.param('fence_enabled') is not None or
+ self.param('fence_skip_if_sd_active') is not None or
+ self.param('fence_skip_if_connectivity_broken') is not None or
+ self.param('fence_skip_if_gluster_bricks_up') is not None or
+ self.param('fence_skip_if_gluster_quorum_not_met') is not None or
+ self.param('fence_connectivity_threshold') is not None
+ ) else None,
+ display=otypes.Display(
+ proxy=self.param('spice_proxy'),
+ ) if self.param('spice_proxy') else None,
+ required_rng_sources=[
+ otypes.RngSource(rng) for rng in self.param('rng_sources')
+ ] if self.param('rng_sources') else None,
+ memory_policy=otypes.MemoryPolicy(
+ over_commit=otypes.MemoryOverCommit(
+ percent=self._get_memory_policy(),
+ ),
+ ) if self.param('memory_policy') else None,
+ ksm=otypes.Ksm(
+ enabled=self.param('ksm'),
+ merge_across_nodes=not self.param('ksm_numa'),
+ ) if (
+ self.param('ksm_numa') is not None or
+ self.param('ksm') is not None
+ ) else None,
+ data_center=otypes.DataCenter(
+ name=self.param('data_center'),
+ ) if self.param('data_center') else None,
+ management_network=otypes.Network(
+ name=self.param('network'),
+ ) if self.param('network') else None,
+ cpu=otypes.Cpu(
+ architecture=otypes.Architecture(
+ self.param('cpu_arch')
+ ) if self.param('cpu_arch') else None,
+ type=self.param('cpu_type'),
+ ) if (
+ self.param('cpu_arch') or self.param('cpu_type')
+ ) else None,
+ version=otypes.Version(
+ major=self.__get_major(self.param('compatibility_version')),
+ minor=self.__get_minor(self.param('compatibility_version')),
+ ) if self.param('compatibility_version') else None,
+ switch_type=otypes.SwitchType(
+ self.param('switch_type')
+ ) if self.param('switch_type') else None,
+ mac_pool=otypes.MacPool(
+ id=get_id_by_name(self._connection.system_service().mac_pools_service(), self.param('mac_pool'))
+ ) if self.param('mac_pool') else None,
+ external_network_providers=self._get_external_network_providers_entity(),
+ custom_scheduling_policy_properties=[
+ otypes.Property(
+ name=sp.get('name'),
+ value=str(sp.get('value')),
+ ) for sp in self.param('scheduling_policy_properties') if sp
+ ] if self.param('scheduling_policy_properties') is not None else None,
+ firewall_type=otypes.FirewallType(
+ self.param('firewall_type')
+ ) if self.param('firewall_type') else None,
+ gluster_tuned_profile=self.param('gluster_tuned_profile'),
+ )
+
+ def _matches_entity(self, item, entity):
+ return equal(item.get('id'), entity.id) and equal(item.get('name'), entity.name)
+
+ def _update_check_external_network_providers(self, entity):
+ if self.param('external_network_providers') is None:
+ return True
+ if entity.external_network_providers is None:
+ return not self.param('external_network_providers')
+ entity_providers = self._connection.follow_link(entity.external_network_providers)
+ entity_provider_ids = [provider.id for provider in entity_providers]
+ entity_provider_names = [provider.name for provider in entity_providers]
+ for provider in self._get_external_network_providers():
+ if provider.get('id'):
+ if provider.get('id') not in entity_provider_ids:
+ return False
+ elif provider.get('name') and provider.get('name') not in entity_provider_names:
+ return False
+ for entity_provider in entity_providers:
+ if not any(self._matches_entity(provider, entity_provider)
+ for provider in self._get_external_network_providers()):
+ return False
+ return True
+
+ def update_check(self, entity):
+ sched_policy = self._get_sched_policy()
+ migration_policy = getattr(entity.migration, 'policy', None)
+ cluster_cpu = getattr(entity, 'cpu', dict())
+
+ def check_custom_scheduling_policy_properties():
+ if self.param('scheduling_policy_properties'):
+ current = []
+ if entity.custom_scheduling_policy_properties:
+ current = [(sp.name, str(sp.value)) for sp in entity.custom_scheduling_policy_properties]
+ passed = [(sp.get('name'), str(sp.get('value'))) for sp in self.param('scheduling_policy_properties') if sp]
+ for p in passed:
+ if p not in current:
+ return False
+ return True
+
+ return (
+ check_custom_scheduling_policy_properties() and
+ equal(self.param('name'), entity.name) and
+ equal(self.param('comment'), entity.comment) and
+ equal(self.param('description'), entity.description) and
+ equal(self.param('switch_type'), str(entity.switch_type)) and
+ equal(self.param('cpu_arch'), str(getattr(cluster_cpu, 'architecture', None))) and
+ equal(self.param('cpu_type'), getattr(cluster_cpu, 'type', None)) and
+ equal(self.param('ballooning'), entity.ballooning_enabled) and
+ equal(self.param('gluster'), entity.gluster_service) and
+ equal(self.param('virt'), entity.virt_service) and
+ equal(self.param('threads_as_cores'), entity.threads_as_cores) and
+ equal(self.param('ksm_numa'), not entity.ksm.merge_across_nodes) and
+ equal(self.param('ksm'), entity.ksm.enabled) and
+ equal(self.param('ha_reservation'), entity.ha_reservation) and
+ equal(self.param('trusted_service'), entity.trusted_service) and
+ equal(self.param('host_reason'), entity.maintenance_reason_required) and
+ equal(self.param('vm_reason'), entity.optional_reason) and
+ equal(self.param('spice_proxy'), getattr(entity.display, 'proxy', None)) and
+ equal(self.param('fence_enabled'), entity.fencing_policy.enabled) and
+ equal(self.param('fence_skip_if_gluster_bricks_up'), entity.fencing_policy.skip_if_gluster_bricks_up) and
+ equal(self.param('fence_skip_if_gluster_quorum_not_met'), entity.fencing_policy.skip_if_gluster_quorum_not_met) and
+ equal(self.param('fence_skip_if_sd_active'), entity.fencing_policy.skip_if_sd_active.enabled) and
+ equal(self.param('fence_skip_if_connectivity_broken'), entity.fencing_policy.skip_if_connectivity_broken.enabled) and
+ equal(self.param('fence_connectivity_threshold'), entity.fencing_policy.skip_if_connectivity_broken.threshold) and
+ equal(self.param('resilience_policy'), str(entity.error_handling.on_error)) and
+ equal(self.param('migration_bandwidth'), str(entity.migration.bandwidth.assignment_method)) and
+ equal(self.param('migration_auto_converge'), str(entity.migration.auto_converge)) and
+ equal(self.param('migration_compressed'), str(entity.migration.compressed)) and
+ equal(self.param('migration_encrypted'), str(entity.migration.encrypted)) and
+ equal(self.param('serial_policy'), str(getattr(entity.serial_number, 'policy', None))) and
+ equal(self.param('serial_policy_value'), getattr(entity.serial_number, 'value', None)) and
+ equal(self.param('scheduling_policy'), getattr(self._connection.follow_link(entity.scheduling_policy), 'name', None)) and
+ equal(self.param('firewall_type'), str(entity.firewall_type)) and
+ equal(self.param('gluster_tuned_profile'), getattr(entity, 'gluster_tuned_profile', None)) and
+ equal(self._get_policy_id(), getattr(migration_policy, 'id', None)) and
+ equal(self._get_memory_policy(), entity.memory_policy.over_commit.percent) and
+ equal(self.__get_minor(self.param('compatibility_version')), self.__get_minor(entity.version)) and
+ equal(self.__get_major(self.param('compatibility_version')), self.__get_major(entity.version)) and
+ equal(
+ self.param('migration_bandwidth_limit') if self.param('migration_bandwidth') == 'custom' else None,
+ entity.migration.bandwidth.custom_value
+ ) and
+ equal(
+ sorted(self.param('rng_sources')) if self.param('rng_sources') else None,
+ sorted([
+ str(source) for source in entity.required_rng_sources
+ ])
+ ) and
+ equal(
+ get_id_by_name(self._connection.system_service().mac_pools_service(), self.param('mac_pool'), raise_error=False),
+ entity.mac_pool.id
+ ) and
+ self._update_check_external_network_providers(entity)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ id=dict(default=None),
+ ballooning=dict(default=None, type='bool', aliases=['balloon']),
+ gluster=dict(default=None, type='bool'),
+ virt=dict(default=None, type='bool'),
+ threads_as_cores=dict(default=None, type='bool'),
+ ksm_numa=dict(default=None, type='bool'),
+ ksm=dict(default=None, type='bool'),
+ ha_reservation=dict(default=None, type='bool'),
+ trusted_service=dict(default=None, type='bool'),
+ vm_reason=dict(default=None, type='bool'),
+ host_reason=dict(default=None, type='bool'),
+ memory_policy=dict(default=None, choices=['disabled', 'server', 'desktop'], aliases=['performance_preset']),
+ rng_sources=dict(default=None, type='list', elements='str'),
+ spice_proxy=dict(default=None),
+ fence_enabled=dict(default=None, type='bool'),
+ fence_skip_if_gluster_bricks_up=dict(default=None, type='bool'),
+ fence_skip_if_gluster_quorum_not_met=dict(default=None, type='bool'),
+ fence_skip_if_sd_active=dict(default=None, type='bool'),
+ fence_skip_if_connectivity_broken=dict(default=None, type='bool'),
+ fence_connectivity_threshold=dict(default=None, type='int'),
+ resilience_policy=dict(default=None, choices=['migrate_highly_available', 'migrate', 'do_not_migrate']),
+ migration_bandwidth=dict(default=None, choices=['auto', 'hypervisor_default', 'custom']),
+ migration_bandwidth_limit=dict(default=None, type='int'),
+ migration_auto_converge=dict(default=None, choices=['true', 'false', 'inherit']),
+ migration_compressed=dict(default=None, choices=['true', 'false', 'inherit']),
+ migration_encrypted=dict(default=None, choices=['true', 'false', 'inherit']),
+ migration_policy=dict(
+ default=None,
+ choices=['legacy', 'minimal_downtime', 'suspend_workload', 'post_copy']
+ ),
+ serial_policy=dict(default=None, choices=['vm', 'host', 'custom']),
+ serial_policy_value=dict(default=None),
+ scheduling_policy=dict(default=None),
+ data_center=dict(default=None),
+ description=dict(default=None),
+ comment=dict(default=None),
+ network=dict(default=None),
+ cpu_arch=dict(default=None, choices=['ppc64', 'undefined', 'x86_64']),
+ cpu_type=dict(default=None),
+ switch_type=dict(default=None, choices=['legacy', 'ovs']),
+ compatibility_version=dict(default=None),
+ mac_pool=dict(default=None),
+ external_network_providers=dict(default=None, type='list', elements='dict'),
+ scheduling_policy_properties=dict(type='list', elements='dict'),
+ firewall_type=dict(choices=['iptables', 'firewalld'], default=None),
+ gluster_tuned_profile=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ clusters_service = connection.system_service().clusters_service()
+ clusters_module = ClustersModule(
+ connection=connection,
+ module=module,
+ service=clusters_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = clusters_module.create()
+ elif state == 'absent':
+ ret = clusters_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster_info.py
new file mode 100644
index 000000000..c949aa0cf
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster_info.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_cluster_info
+short_description: Retrieve information about one or more oVirt/RHV clusters
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV clusters."
+ - This module was called C(ovirt_cluster_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_cluster_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_clusters), which
+ contains a list of clusters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search cluster X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all clusters which names start with C<production>:
+- ovirt.ovirt.ovirt_cluster_info:
+ pattern:
+ name: 'production*'
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_clusters }}"
+'''
+
+RETURN = '''
+ovirt_clusters:
+ description: "List of dictionaries describing the clusters. Cluster attributes are mapped to dictionary keys,
+ all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ clusters_service = connection.system_service().clusters_service()
+ clusters = clusters_service.list(
+ search=module.params['pattern'],
+ follow=",".join(module.params['follow'])
+ )
+ result = dict(
+ ovirt_clusters=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in clusters
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter.py
new file mode 100644
index 000000000..1be8fa67e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_datacenter
+short_description: Module to manage data centers in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage data centers in oVirt/RHV"
+options:
+ id:
+ description:
+ - "ID of the datacenter to manage."
+ type: str
+ name:
+ description:
+ - "Name of the data center to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the data center be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ description:
+ description:
+ - "Description of the data center."
+ type: str
+ comment:
+ description:
+ - "Comment of the data center."
+ type: str
+ local:
+ description:
+ - "I(True) if the data center should be local, I(False) if should be shared."
+ - "Default value is set by engine."
+ type: bool
+ compatibility_version:
+ description:
+ - "Compatibility version of the data center."
+ type: str
+ quota_mode:
+ description:
+ - "Quota mode of the data center. One of I(disabled), I(audit) or I(enabled)"
+ choices: ['disabled', 'audit', 'enabled']
+ type: str
+ mac_pool:
+ description:
+ - "MAC pool to be used by this datacenter."
+ - "IMPORTANT: This option is deprecated in oVirt/RHV 4.1. You should
+ use C(mac_pool) in C(ovirt_clusters) module, as MAC pools are
+ set per cluster since 4.1."
+ type: str
+ force:
+ description:
+ - "This parameter can be used only when removing a data center.
+ If I(True) data center will be forcibly removed, even though it
+ contains some clusters. Default value is I(False), which means
+ that only empty data center can be removed."
+ type: bool
+ iscsi_bonds:
+ description:
+ - "List of iscsi bonds, which should be created in datacenter."
+ suboptions:
+ name:
+ description:
+ - "Name of the iscsi bond."
+ type: str
+ networks:
+ description:
+ - "List of network names in bond."
+ type: list
+ elements: str
+ storage_domains:
+ description:
+ - "List of storage domain names and it will automatically get all storage_connections in the domain."
+ type: list
+ default: []
+ elements: str
+ storage_connections:
+ description:
+ - "List of storage_connection IDs. Used when you want to use specific storage connection instead of all in storage domain."
+ type: list
+ default: []
+ elements: str
+ type: list
+ elements: dict
+
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create datacenter
+- ovirt.ovirt.ovirt_datacenter:
+ name: mydatacenter
+ local: True
+ compatibility_version: 4.0
+ quota_mode: enabled
+
+# Remove datacenter
+- ovirt.ovirt.ovirt_datacenter:
+ state: absent
+ name: mydatacenter
+
+# Change Datacenter Name
+- ovirt.ovirt.ovirt_datacenter:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_datacenter_name"
+
+# Create datacenter with iscsi bond
+- ovirt.ovirt.ovirt_datacenter:
+ name: mydatacenter
+ iscsi_bonds:
+ - name: bond1
+ networks:
+ - network1
+ - network2
+ storage_domains:
+ - storage1
+ - name: bond2
+ networks:
+ - network3
+ storage_connections:
+ - cf780201-6a4f-43c1-a019-e65c4220ab73
+
+# Remove all iscsi bonds
+- ovirt.ovirt.ovirt_datacenter:
+ name: mydatacenter
+ iscsi_bonds: []
+'''
+
+RETURN = '''
+id:
+ description: "ID of the managed datacenter"
+ returned: "On success if datacenter is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+data_center:
+ description: "Dictionary of all the datacenter attributes. Datacenter attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/datacenter."
+ returned: "On success if datacenter is found."
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+ follow_link,
+ get_id_by_name
+)
+
+
+class DatacentersModule(BaseModule):
+
+ def __get_major(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.major
+ return int(full_version.split('.')[0])
+
+ def __get_minor(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.minor
+ return int(full_version.split('.')[1])
+
+ def _get_mac_pool(self):
+ mac_pool = None
+ if self._module.params.get('mac_pool'):
+ mac_pool = search_by_name(
+ self._connection.system_service().mac_pools_service(),
+ self._module.params.get('mac_pool'),
+ )
+ return mac_pool
+
+ def build_entity(self):
+ return otypes.DataCenter(
+ name=self._module.params['name'],
+ id=self._module.params['id'],
+ comment=self._module.params['comment'],
+ description=self._module.params['description'],
+ mac_pool=otypes.MacPool(
+ id=getattr(self._get_mac_pool(), 'id', None),
+ ) if self._module.params.get('mac_pool') else None,
+ quota_mode=otypes.QuotaModeType(
+ self._module.params['quota_mode']
+ ) if self._module.params['quota_mode'] else None,
+ local=self._module.params['local'],
+ version=otypes.Version(
+ major=self.__get_major(self._module.params['compatibility_version']),
+ minor=self.__get_minor(self._module.params['compatibility_version']),
+ ) if self._module.params['compatibility_version'] else None,
+ )
+
+ def update_check(self, entity):
+ minor = self.__get_minor(self._module.params.get('compatibility_version'))
+ major = self.__get_major(self._module.params.get('compatibility_version'))
+ return (
+ equal(getattr(self._get_mac_pool(), 'id', None), getattr(entity.mac_pool, 'id', None)) and
+ equal(self._module.params.get('comment'), entity.comment) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('quota_mode'), str(entity.quota_mode)) and
+ equal(self._module.params.get('local'), entity.local) and
+ equal(minor, self.__get_minor(entity.version)) and
+ equal(major, self.__get_major(entity.version))
+ )
+
+
+def get_storage_connections(iscsi_bond, connection):
+ resp = []
+ for storage_domain_name in iscsi_bond.get('storage_domains', []):
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domain = storage_domains_service.storage_domain_service(
+ get_id_by_name(storage_domains_service, storage_domain_name)).get()
+ resp.extend(connection.follow_link(storage_domain.storage_connections))
+
+ for storage_connection_id in iscsi_bond.get('storage_connections', []):
+ resp.append(connection.system_service().storage_connections_service(
+ ).storage_connection_service(storage_connection_id).get())
+ return resp
+
+
+def serialize_iscsi_bond(iscsi_bonds):
+ return [{"name": bond.name,
+ "networks": [net.name for net in bond.networks],
+ "storage_connections": [connection.address for connection in bond.storage_connections]} for bond in iscsi_bonds]
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ description=dict(default=None),
+ local=dict(type='bool'),
+ id=dict(default=None),
+ compatibility_version=dict(default=None),
+ quota_mode=dict(choices=['disabled', 'audit', 'enabled']),
+ comment=dict(default=None),
+ mac_pool=dict(default=None),
+ force=dict(default=None, type='bool'),
+ iscsi_bonds=dict(type='list', default=None, elements='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ data_centers_service = connection.system_service().data_centers_service()
+ data_centers_module = DatacentersModule(
+ connection=connection,
+ module=module,
+ service=data_centers_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = data_centers_module.create()
+ if module.params.get('iscsi_bonds') is not None:
+ iscsi_bonds_service = data_centers_service.data_center_service(
+ ret.get('id')).iscsi_bonds_service()
+ before_iscsi_bonds = iscsi_bonds_service.list()
+ networks_service = connection.system_service().networks_service()
+ # Remove existing bonds
+ for bond in iscsi_bonds_service.list():
+ iscsi_bonds_service.iscsi_bond_service(bond.id).remove()
+ # Create new bond
+ for new_bond in module.params.get('iscsi_bonds'):
+ iscsi_bond = otypes.IscsiBond(
+ name=new_bond.get('name'),
+ data_center=data_centers_service.data_center_service(
+ ret.get('id')).get(),
+ storage_connections=get_storage_connections(
+ new_bond, connection),
+ networks=[search_by_name(networks_service, network_name)
+ for network_name in new_bond.get('networks')],
+ )
+ iscsi_bonds_service.add(iscsi_bond)
+ ret['changed'] = ret['changed'] or serialize_iscsi_bond(
+ before_iscsi_bonds) != serialize_iscsi_bond(iscsi_bonds_service.list())
+ elif state == 'absent':
+ ret = data_centers_module.remove(force=module.params['force'])
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter_info.py
new file mode 100644
index 000000000..bfc17e305
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter_info.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_datacenter_info
+short_description: Retrieve information about one or more oVirt/RHV datacenters
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV datacenters."
+ - This module was called C(ovirt_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_datacenter_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_datacenters), which
+ contains a list of datacenters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search datacenter I(X) use following pattern: I(name=X)"
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/data_center/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all data centers which names start with C(production):
+- ovirt.ovirt.ovirt_datacenter_info:
+ pattern: name=production*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_datacenters }}"
+'''
+
+RETURN = '''
+ovirt_datacenters:
+ description: "List of dictionaries describing the datacenters. Datacenter attributes are mapped to dictionary keys,
+ all datacenters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/data_center."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ datacenters = datacenters_service.list(
+ search=module.params['pattern'],
+ follow=",".join(module.params['follow'])
+ )
+ result = dict(
+ ovirt_datacenters=[
+ get_dict_of_struct(
+ struct=d,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for d in datacenters
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk.py
new file mode 100644
index 000000000..5d83d21d0
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk.py
@@ -0,0 +1,1006 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_disk
+short_description: "Module to manage Virtual Machine and floating disks in oVirt/RHV"
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage Virtual Machine and floating disks in oVirt/RHV."
+ - "WARNING: If you are installing the collection from ansible galaxy you need to install 'qemu-img' package."
+options:
+ id:
+ description:
+ - "ID of the disk to manage. Either C(id) or C(name) is required."
+ type: str
+ name:
+ description:
+ - "Name of the disk to manage. Either C(id) or C(name)/C(alias) is required."
+ aliases: ['alias']
+ type: str
+ description:
+ description:
+ - "Description of the disk image to manage."
+ type: str
+ vm_name:
+ description:
+ - "Name of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
+ type: str
+ vm_id:
+ description:
+ - "ID of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
+ type: str
+ state:
+ description:
+ - "Should the Virtual Machine disk be present/absent/attached/detached/exported/imported."
+ choices: ['present', 'absent', 'attached', 'detached', 'exported', 'imported']
+ default: 'present'
+ type: str
+ download_image_path:
+ description:
+ - "Path on a file system where disk should be downloaded."
+ - "Note that you must have an valid oVirt/RHV engine CA in your system trust store
+ or you must provide it in C(ca_file) parameter."
+ - "Note that the disk is not downloaded when the file already exists,
+ but you can forcibly download the disk when using C(force) I (true)."
+ type: str
+ upload_image_path:
+ description:
+ - "Path to disk image, which should be uploaded."
+ - "Note if C(size) is not specified the size of the disk will be determined by the size of the specified image."
+ - "Note that currently we support only compatibility version 0.10 of the qcow disk."
+ - "Note that you must have an valid oVirt/RHV engine CA in your system trust store
+ or you must provide it in C(ca_file) parameter."
+ - "Note that there is no reliable way to achieve idempotency, so
+ if you want to upload the disk even if the disk with C(id) or C(name) exists,
+ then please use C(force) I(true). If you will use C(force) I(false), which
+ is default, then the disk image won't be uploaded."
+ - "Note that to upload iso the C(format) should be 'raw'"
+ type: str
+ aliases: ['image_path']
+ size:
+ description:
+ - "Size of the disk. Size should be specified using IEC standard units.
+ For example 10GiB, 1024MiB, etc."
+ - "Size can be only increased, not decreased."
+ - "If the disk is referenced by C(name) and is attached to a VM, make sure to specify C(vm_name)/C(vm_id)
+ to prevent extension of another disk that is not attached to the VM."
+ type: str
+ interface:
+ description:
+ - "Driver of the storage interface."
+ - "It's required parameter when creating the new disk."
+ choices: ['virtio', 'ide', 'sata', 'virtio_scsi']
+ type: str
+ format:
+ description:
+ - Specify format of the disk.
+ - Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
+ choices: ['raw', 'cow']
+ default: 'cow'
+ type: str
+ content_type:
+ description:
+ - Specify if the disk is a data disk or ISO image or a one of a the Hosted Engine disk types
+ - The Hosted Engine disk content types are available with Engine 4.3+ and Ansible 2.8
+ choices: ['data', 'iso', 'hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration']
+ default: 'data'
+ type: str
+ sparse:
+ required: False
+ type: bool
+ description:
+ - "I(True) if the disk should be sparse (also known as I(thin provision)).
+ If the parameter is omitted, cow disks will be created as sparse and raw disks as I(preallocated)"
+ - Note that this option isn't idempotent as it's not currently possible to change sparseness of the disk via API.
+ storage_domain:
+ description:
+ - "Storage domain name where disk should be created."
+ type: str
+ storage_domains:
+ description:
+ - "Storage domain names where disk should be copied."
+ - "C(**IMPORTANT**)"
+ - "There is no reliable way to achieve idempotency, so every time
+ you specify this parameter the disks are copied, so please handle
+ your playbook accordingly to not copy the disks all the time. This
+ is valid only for VM and floating disks, template disks works
+ as expected."
+ type: list
+ elements: str
+ force:
+ description:
+ - "Please take a look at C(image_path) documentation to see the correct
+ usage of this parameter."
+ type: bool
+ default: false
+ profile:
+ description:
+ - "Disk profile name to be attached to disk. By default profile is chosen by oVirt/RHV engine."
+ type: str
+ quota_id:
+ description:
+ - "Disk quota ID to be used for disk. By default quota is chosen by oVirt/RHV engine."
+ type: str
+ bootable:
+ description:
+ - "I(True) if the disk should be bootable. By default when disk is created it isn't bootable."
+ type: bool
+ shareable:
+ description:
+ - "I(True) if the disk should be shareable. By default when disk is created it isn't shareable."
+ type: bool
+ read_only:
+ description:
+ - "I(True) if the disk should be read_only. By default when disk is created it isn't read_only."
+ type: bool
+ logical_unit:
+ description:
+ - "Dictionary which describes LUN to be directly attached to VM:"
+ suboptions:
+ address:
+ description:
+ - Address of the storage server. Used by iSCSI.
+ port:
+ description:
+ - Port of the storage server. Used by iSCSI.
+ target:
+ description:
+ - iSCSI target.
+ lun_id:
+ description:
+ - LUN id.
+ username:
+ description:
+ - CHAP Username to be used to access storage server. Used by iSCSI.
+ password:
+ description:
+ - CHAP Password of the user to be used to access storage server. Used by iSCSI.
+ storage_type:
+ description:
+ - Storage type either I(fcp) or I(iscsi).
+ type: dict
+ sparsify:
+ description:
+ - "I(True) if the disk should be sparsified."
+ - "Sparsification frees space in the disk image that is not used by
+ its filesystem. As a result, the image will occupy less space on
+ the storage."
+ - "Note that this parameter isn't idempotent, as it's not possible
+ to check if the disk should be or should not be sparsified."
+ type: bool
+ openstack_volume_type:
+ description:
+ - "Name of the openstack volume type. This is valid when working
+ with cinder."
+ type: str
+ image_provider:
+ description:
+ - "When C(state) is I(exported) disk is exported to given Glance image provider."
+ - "When C(state) is I(imported) disk is imported from given Glance image provider."
+ - "C(**IMPORTANT**)"
+ - "There is no reliable way to achieve idempotency, so every time
+ you specify this parameter the disk is exported, so please handle
+ your playbook accordingly to not export the disk all the time.
+ This option is valid only for template disks."
+ type: str
+ host:
+ description:
+ - "When the hypervisor name is specified the newly created disk or
+ an existing disk will refresh its information about the
+ underlying storage( Disk size, Serial, Product ID, Vendor ID ...)
+ The specified host will be used for gathering the storage
+ related information. This option is only valid for passthrough
+ disks. This option requires at least the logical_unit.id to be
+ specified"
+ type: str
+ wipe_after_delete:
+ description:
+ - "If the disk's Wipe After Delete is enabled, then the disk is first wiped."
+ type: bool
+ activate:
+ description:
+ - I(True) if the disk should be activated.
+ - When creating disk of virtual machine it is set to I(True).
+ type: bool
+ backup:
+ description:
+ - The backup behavior supported by the disk.
+ choices: ['incremental']
+ version_added: 1.1.0
+ type: str
+ scsi_passthrough:
+ description:
+ - Indicates whether SCSI passthrough is enable and its policy.
+ - Setting a value of `filtered`/`unfiltered` will enable SCSI passthrough for a LUN disk with unprivileged/privileged SCSI I/O.
+ - To disable SCSI passthrough the value should be set to `disabled`
+ choices: ['disabled', 'filtered', 'unfiltered']
+ type: str
+ version_added: 1.2.0
+ propagate_errors:
+ description:
+ - Indicates if disk errors should cause virtual machine to be paused or if disk errors should be
+ - propagated to the the guest operating system instead.
+ type: bool
+ version_added: 1.2.0
+ pass_discard:
+ description:
+ - Defines whether the virtual machine passes discard commands to the storage.
+ type: bool
+ version_added: 1.2.0
+ uses_scsi_reservation:
+ description:
+ - Defines whether SCSI reservation is enabled for this disk.
+ type: bool
+ version_added: 1.2.0
+ max_workers:
+ description:
+ - The number of workers which should be used in the upload/download of the image.
+ - The use of multiple workers can speed up the process.
+ type: int
+ version_added: 1.7.0
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create and attach new disk to VM
+- ovirt.ovirt.ovirt_disk:
+ name: myvm_disk
+ vm_name: rhel7
+ size: 10GiB
+ format: cow
+ interface: virtio
+ storage_domain: data
+
+# Attach logical unit to VM rhel7
+- ovirt.ovirt.ovirt_disk:
+ vm_name: rhel7
+ logical_unit:
+ target: iqn.2016-08-09.brq.str-01:omachace
+ id: 1IET_000d0001
+ address: 10.34.63.204
+ interface: virtio
+
+# Detach disk from VM
+- ovirt.ovirt.ovirt_disk:
+ state: detached
+ name: myvm_disk
+ vm_name: rhel7
+ size: 10GiB
+ format: cow
+ interface: virtio
+
+# Change Disk Name
+- ovirt.ovirt.ovirt_disk:
+ id: 00000000-0000-0000-0000-000000000000
+ storage_domain: data
+ name: "new_disk_name"
+ vm_name: rhel7
+
+# Upload local image to disk and attach it to vm:
+# Since Ansible 2.3
+- ovirt.ovirt.ovirt_disk:
+ name: mydisk
+ vm_name: myvm
+ interface: virtio
+ size: 10GiB
+ format: cow
+ image_path: /path/to/mydisk.qcow2
+ storage_domain: data
+
+# Download disk to local file system:
+# Since Ansible 2.3
+- ovirt.ovirt.ovirt_disk:
+ id: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+ download_image_path: /home/user/mydisk.qcow2
+
+# Export disk as image to Glance domain
+# Since Ansible 2.4
+- ovirt.ovirt.ovirt_disk:
+ id: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+ image_provider: myglance
+ state: exported
+
+# Defining a specific quota while creating a disk image:
+# Since Ansible 2.5
+- ovirt.ovirt.ovirt_quotas_info:
+ data_center: Default
+ name: myquota
+ register: quota
+- ovirt.ovirt.ovirt_disk:
+ name: mydisk
+ size: 10GiB
+ storage_domain: data
+ description: somedescriptionhere
+ quota_id: "{{ quota.ovirt_quotas[0]['id'] }}"
+
+# Upload an ISO image
+# Since Ansible 2.8
+- ovirt.ovirt.ovirt_disk:
+ name: myiso
+ upload_image_path: /path/to/iso/image
+ storage_domain: data
+ size: 4 GiB
+ wait: true
+ bootable: true
+ format: raw
+ content_type: iso
+
+# Add fiber chanel disk
+- name: Create disk
+ ovirt.ovirt.ovirt_disk:
+ name: fcp_disk
+ host: my_host
+ logical_unit:
+ id: 3600a09803830447a4f244c4657597777
+ storage_type: fcp
+'''
+
+
+RETURN = '''
+id:
+ description: "ID of the managed disk"
+ returned: "On success if disk is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+disk:
+ description: "Dictionary of all the disk attributes. Disk attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk."
+ returned: "On success if disk is found and C(vm_id) or C(vm_name) wasn't passed."
+ type: dict
+
+disk_attachment:
+ description: "Dictionary of all the disk attachment attributes. Disk attachment attributes can be found
+ on your oVirt/RHV instance at following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk_attachment."
+ returned: "On success if disk is found and C(vm_id) or C(vm_name) was passed and VM was found."
+ type: dict
+'''
+
+import json
+import os
+import ssl
+import subprocess
+import time
+import traceback
+import inspect
+
+from ansible.module_utils.six.moves.http_client import HTTPSConnection, IncompleteRead
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+try:
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.types as otypes
+ from ovirt_imageio import client
+except ImportError:
+ pass
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ convert_to_bytes,
+ equal,
+ follow_link,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ get_dict_of_struct,
+ search_by_name,
+ wait,
+)
+
+
+def _search_by_lun(disks_service, lun_id):
+ """
+ Find disk by LUN ID.
+ """
+ res = [
+ disk for disk in disks_service.list(search='disk_type=lun') if (
+ disk.lun_storage.id == lun_id
+ )
+ ]
+ return res[0] if res else None
+
+
+def create_transfer_connection(module, transfer, context, connect_timeout=10, read_timeout=60):
+ url = urlparse(transfer.transfer_url)
+ connection = HTTPSConnection(
+ url.netloc, context=context, timeout=connect_timeout)
+ try:
+ connection.connect()
+ except Exception as e:
+ # Typically ConnectionRefusedError or socket.gaierror.
+ module.warn("Cannot connect to %s, trying %s: %s" % (transfer.transfer_url, transfer.proxy_url, e))
+
+ url = urlparse(transfer.proxy_url)
+ connection = HTTPSConnection(
+ url.netloc, context=context, timeout=connect_timeout)
+ connection.connect()
+
+ connection.sock.settimeout(read_timeout)
+ return connection, url
+
+
+def start_transfer(connection, module, direction):
+ transfers_service = connection.system_service().image_transfers_service()
+ hosts_service = connection.system_service().hosts_service()
+ transfer = transfers_service.add(
+ otypes.ImageTransfer(
+ disk=otypes.Disk(id=module.params.get('id')),
+ direction=direction,
+ timeout_policy=otypes.ImageTransferTimeoutPolicy.LEGACY,
+ host=otypes.Host(
+ id=get_id_by_name(hosts_service, module.params.get('host'))
+ ) if module.params.get('host') else None,
+ # format=raw uses the NBD backend, enabling:
+ # - Transfer raw guest data, regardless of the disk format.
+ # - Automatic format conversion to remote disk format. For example,
+ # upload qcow2 image to raw disk, or raw image to qcow2 disk.
+ # - Collapsed qcow2 chains to single raw file.
+ # - Extents reporting for qcow2 images and raw images on file storage,
+ # speeding up downloads.
+ format=otypes.DiskFormat.RAW,
+ )
+ )
+ transfer_service = transfers_service.image_transfer_service(transfer.id)
+
+ start = time.time()
+
+ while True:
+ time.sleep(1)
+ try:
+ transfer = transfer_service.get()
+ except sdk.NotFoundError:
+ # The system has removed the disk and the transfer.
+ raise RuntimeError("Transfer {0} was removed".format(transfer.id))
+
+ if transfer.phase == otypes.ImageTransferPhase.FINISHED_FAILURE:
+ # The system will remove the disk and the transfer soon.
+ raise RuntimeError("Transfer {0} has failed".format(transfer.id))
+
+ if transfer.phase == otypes.ImageTransferPhase.PAUSED_SYSTEM:
+ transfer_service.cancel()
+ raise RuntimeError(
+ "Transfer {0} was paused by system".format(transfer.id))
+
+ if transfer.phase == otypes.ImageTransferPhase.TRANSFERRING:
+ break
+
+ if transfer.phase != otypes.ImageTransferPhase.INITIALIZING:
+ transfer_service.cancel()
+ raise RuntimeError(
+ "Unexpected transfer {0} phase {1}"
+ .format(transfer.id, transfer.phase))
+
+ if time.time() > start + module.params.get('timeout'):
+ transfer_service.cancel()
+ raise RuntimeError(
+ "Timed out waiting for transfer {0}".format(transfer.id))
+
+ hosts_service = connection.system_service().hosts_service()
+ host_service = hosts_service.host_service(transfer.host.id)
+ transfer.host = host_service.get()
+ return transfer
+
+
+def cancel_transfer(connection, transfer_id):
+ transfer_service = (connection.system_service()
+ .image_transfers_service()
+ .image_transfer_service(transfer_id))
+ transfer_service.cancel()
+
+
+def finalize_transfer(connection, module, transfer_id):
+ transfer_service = (connection.system_service()
+ .image_transfers_service()
+ .image_transfer_service(transfer_id))
+ start = time.time()
+
+ transfer_service.finalize()
+ while True:
+ time.sleep(1)
+ try:
+ transfer = transfer_service.get()
+ except sdk.NotFoundError:
+ # Old engine (< 4.4.7): since the transfer was already deleted from
+ # the database, we can assume that the disk status is already
+ # updated, so we can check it only once.
+ disk_service = (connection.system_service()
+ .disks_service()
+ .disk_service(module.params['id']))
+ try:
+ disk = disk_service.get()
+ except sdk.NotFoundError:
+ # Disk verification failed and the system removed the disk.
+ raise RuntimeError(
+ "Transfer {0} failed: disk {1} was removed"
+ .format(transfer.id, module.params['id']))
+
+ if disk.status == otypes.DiskStatus.OK:
+ break
+
+ raise RuntimeError(
+ "Transfer {0} failed: disk {1} is '{2}'"
+ .format(transfer.id, module.params['id'], disk.status))
+
+ if transfer.phase == otypes.ImageTransferPhase.FINISHED_SUCCESS:
+ break
+
+ if transfer.phase == otypes.ImageTransferPhase.FINISHED_FAILURE:
+ raise RuntimeError(
+ "Transfer {0} failed, phase: {1}"
+ .format(transfer.id, transfer.phase))
+
+ if time.time() > start + module.params.get('timeout'):
+ raise RuntimeError(
+ "Timed out waiting for transfer {0} to finalize, phase: {1}"
+ .format(transfer.id, transfer.phase))
+
+
+def download_disk_image(connection, module):
+ transfers_service = connection.system_service().image_transfers_service()
+ hosts_service = connection.system_service().hosts_service()
+ transfer = start_transfer(connection, module, otypes.ImageTransferDirection.DOWNLOAD)
+ try:
+ extra_args = {}
+ parameters = inspect.signature(client.download).parameters
+ if "proxy_url" in parameters:
+ extra_args["proxy_url"] = transfer.proxy_url
+ if module.params.get('max_workers') and "max_workers" in parameters:
+ extra_args["max_workers"] = module.params.get('max_workers')
+ client.download(
+ transfer.transfer_url,
+ module.params.get('download_image_path'),
+ module.params.get('auth').get('ca_file'),
+ fmt='qcow2' if module.params.get('format') == 'cow' else 'raw',
+ secure=not module.params.get('auth').get('insecure'),
+ buffer_size=client.BUFFER_SIZE,
+ **extra_args
+ )
+ except Exception as e:
+ cancel_transfer(connection, transfer.id)
+ raise e
+ finalize_transfer(connection, module, transfer.id)
+ return True
+
+
+def upload_disk_image(connection, module):
+ transfers_service = connection.system_service().image_transfers_service()
+ hosts_service = connection.system_service().hosts_service()
+ transfer = start_transfer(connection, module, otypes.ImageTransferDirection.UPLOAD)
+ try:
+ extra_args = {}
+ parameters = inspect.signature(client.upload).parameters
+ if "proxy_url" in parameters:
+ extra_args["proxy_url"] = transfer.proxy_url
+ if module.params.get('max_workers') and "max_workers" in parameters:
+ extra_args["max_workers"] = module.params.get('max_workers')
+ client.upload(
+ module.params.get('upload_image_path'),
+ transfer.transfer_url,
+ module.params.get('auth').get('ca_file'),
+ secure=not module.params.get('auth').get('insecure'),
+ buffer_size=client.BUFFER_SIZE,
+ **extra_args
+ )
+ except Exception as e:
+ cancel_transfer(connection, transfer.id)
+ raise e
+ finalize_transfer(connection, module, transfer.id)
+ return True
+
+
+class DisksModule(BaseModule):
+
+ def build_entity(self):
+ hosts_service = self._connection.system_service().hosts_service()
+ logical_unit = self._module.params.get('logical_unit')
+ size = convert_to_bytes(self._module.params.get('size'))
+ if not size and self._module.params.get('upload_image_path'):
+ out = subprocess.check_output(
+ ["qemu-img", "info", "--output", "json", self._module.params.get('upload_image_path')])
+ image_info = json.loads(out)
+ size = image_info["virtual-size"]
+ disk = otypes.Disk(
+ id=self._module.params.get('id'),
+ name=self._module.params.get('name'),
+ description=self._module.params.get('description'),
+ format=otypes.DiskFormat(
+ self._module.params.get('format')
+ ) if self._module.params.get('format') else None,
+ content_type=otypes.DiskContentType(
+ self._module.params.get('content_type')
+ ) if self._module.params.get('content_type') else None,
+ sparse=self._module.params.get(
+ 'sparse'
+ ) if self._module.params.get(
+ 'sparse'
+ ) is not None else self._module.params.get('format') != 'raw',
+ openstack_volume_type=otypes.OpenStackVolumeType(
+ name=self.param('openstack_volume_type')
+ ) if self.param('openstack_volume_type') else None,
+ provisioned_size=size,
+ storage_domains=[
+ otypes.StorageDomain(
+ name=self._module.params.get('storage_domain'),
+ ),
+ ],
+ disk_profile=otypes.DiskProfile(
+ id=get_id_by_name(self._connection.system_service().disk_profiles_service(), self._module.params.get('profile'))
+ ) if self._module.params.get('profile') else None,
+ quota=otypes.Quota(id=self._module.params.get('quota_id')) if self.param('quota_id') else None,
+ shareable=self._module.params.get('shareable'),
+ sgio=otypes.ScsiGenericIO(self.param('scsi_passthrough')) if self.param('scsi_passthrough') else None,
+ propagate_errors=self.param('propagate_errors'),
+ backup=otypes.DiskBackup(self.param('backup')) if self.param('backup') else None,
+ wipe_after_delete=self.param('wipe_after_delete'),
+ lun_storage=otypes.HostStorage(
+ host=otypes.Host(
+ id=get_id_by_name(hosts_service, self._module.params.get('host'))
+ ) if self.param('host') else None,
+ type=otypes.StorageType(
+ logical_unit.get('storage_type', 'iscsi')
+ ),
+ logical_units=[
+ otypes.LogicalUnit(
+ address=logical_unit.get('address'),
+ port=logical_unit.get('port', 3260),
+ target=logical_unit.get('target'),
+ id=logical_unit.get('id'),
+ username=logical_unit.get('username'),
+ password=logical_unit.get('password'),
+ )
+ ],
+ ) if logical_unit else None,
+ )
+ if hasattr(disk, 'initial_size') and self._module.params['upload_image_path']:
+ out = subprocess.check_output([
+ 'qemu-img',
+ 'measure',
+ '-O', 'qcow2' if self._module.params.get('format') == 'cow' else 'raw',
+ '--output', 'json',
+ self._module.params['upload_image_path']
+ ])
+ measure = json.loads(out)
+ disk.initial_size = measure["required"]
+
+ return disk
+
+ def update_storage_domains(self, disk_id):
+ changed = False
+ disk_service = self._service.service(disk_id)
+ disk = disk_service.get()
+ sds_service = self._connection.system_service().storage_domains_service()
+
+ # We don't support move&copy for non file based storages:
+ if disk.storage_type != otypes.DiskStorageType.IMAGE:
+ return changed
+ if disk.content_type in [
+ otypes.DiskContentType(x) for x in ['hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration']]:
+ return changed
+ # Initiate move:
+ if self._module.params['storage_domain']:
+ new_disk_storage_id = get_id_by_name(sds_service, self._module.params['storage_domain'])
+ if new_disk_storage_id in [sd.id for sd in disk.storage_domains]:
+ return changed
+ changed = self.action(
+ action='move',
+ entity=disk,
+ action_condition=lambda d: new_disk_storage_id != d.storage_domains[0].id,
+ wait_condition=lambda d: d.status == otypes.DiskStatus.OK,
+ storage_domain=otypes.StorageDomain(
+ id=new_disk_storage_id,
+ ),
+ post_action=lambda _: time.sleep(self._module.params['poll_interval']),
+ )['changed']
+
+ if self._module.params['storage_domains']:
+ for sd in self._module.params['storage_domains']:
+ new_disk_storage = search_by_name(sds_service, sd)
+ changed = changed or self.action(
+ action='copy',
+ entity=disk,
+ action_condition=(
+ lambda disk: new_disk_storage.id not in [sd.id for sd in disk.storage_domains]
+ ),
+ wait_condition=lambda disk: disk.status == otypes.DiskStatus.OK,
+ storage_domain=otypes.StorageDomain(
+ id=new_disk_storage.id,
+ ),
+ )['changed']
+
+ return changed
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self.param('quota_id'), getattr(entity.quota, 'id', None)) and
+ equal(convert_to_bytes(self._module.params.get('size')), entity.provisioned_size) and
+ equal(self._module.params.get('shareable'), entity.shareable) and
+ equal(self.param('propagate_errors'), entity.propagate_errors) and
+ equal(otypes.ScsiGenericIO(self.param('scsi_passthrough')) if self.param('scsi_passthrough') else None, entity.sgio) and
+ equal(self.param('wipe_after_delete'), entity.wipe_after_delete) and
+ equal(self.param('profile'), follow_link(self._connection, entity.disk_profile).name)
+ )
+
+
+class DiskAttachmentsModule(DisksModule):
+
+ def build_entity(self):
+ return otypes.DiskAttachment(
+ disk=super(DiskAttachmentsModule, self).build_entity(),
+ interface=otypes.DiskInterface(
+ self._module.params.get('interface')
+ ) if self._module.params.get('interface') else None,
+ bootable=self._module.params.get('bootable'),
+ active=self.param('activate'),
+ read_only=self.param('read_only'),
+ uses_scsi_reservation=self.param('uses_scsi_reservation'),
+ pass_discard=self.param('pass_discard'),
+ )
+
+ def update_check(self, entity):
+ return (
+ super(DiskAttachmentsModule, self).update_check(follow_link(self._connection, entity.disk)) and
+ equal(self._module.params.get('interface'), str(entity.interface)) and
+ equal(self._module.params.get('bootable'), entity.bootable) and
+ equal(self._module.params.get('pass_discard'), entity.pass_discard) and
+ equal(self._module.params.get('read_only'), entity.read_only) and
+ equal(self._module.params.get('uses_scsi_reservation'), entity.uses_scsi_reservation) and
+ equal(self.param('activate'), entity.active)
+ )
+
+
+def searchable_attributes(module):
+ """
+ Return all searchable disk attributes passed to module.
+ """
+ attributes = {
+ 'name': module.params.get('name'),
+ 'Storage.name': module.params.get('storage_domain'),
+ 'vm_names': module.params.get('vm_name') if module.params.get('state') != 'attached' else None,
+ }
+ return dict((k, v) for k, v in attributes.items() if v is not None)
+
+
+def get_vm_service(connection, module):
+ if module.params.get('vm_id') is not None or module.params.get('vm_name') is not None and module.params['state'] != 'absent':
+ vms_service = connection.system_service().vms_service()
+
+ # If `vm_id` isn't specified, find VM by name:
+ vm_id = module.params['vm_id']
+ if vm_id is None:
+ vm_id = get_id_by_name(vms_service, module.params['vm_name'])
+
+ if vm_id is None:
+ module.fail_json(
+ msg="VM don't exists, please create it first."
+ )
+
+ return vms_service.vm_service(vm_id)
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'attached', 'detached', 'exported', 'imported'],
+ default='present'
+ ),
+ id=dict(default=None),
+ name=dict(default=None, aliases=['alias']),
+ description=dict(default=None),
+ vm_name=dict(default=None),
+ vm_id=dict(default=None),
+ size=dict(default=None),
+ interface=dict(default=None, choices=['virtio', 'ide', 'sata', 'virtio_scsi']),
+ storage_domain=dict(default=None),
+ storage_domains=dict(default=None, type='list', elements='str'),
+ profile=dict(default=None),
+ quota_id=dict(default=None),
+ format=dict(default='cow', choices=['raw', 'cow']),
+ content_type=dict(
+ default='data',
+ choices=['data', 'iso', 'hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration']
+ ),
+ backup=dict(default=None, type='str', choices=['incremental']),
+ sparse=dict(default=None, type='bool'),
+ bootable=dict(default=None, type='bool'),
+ shareable=dict(default=None, type='bool'),
+ scsi_passthrough=dict(default=None, type='str', choices=['disabled', 'filtered', 'unfiltered']),
+ uses_scsi_reservation=dict(default=None, type='bool'),
+ pass_discard=dict(default=None, type='bool'),
+ propagate_errors=dict(default=None, type='bool'),
+ logical_unit=dict(default=None, type='dict'),
+ read_only=dict(default=None, type='bool'),
+ download_image_path=dict(default=None),
+ upload_image_path=dict(default=None, aliases=['image_path']),
+ force=dict(default=False, type='bool'),
+ sparsify=dict(default=None, type='bool'),
+ openstack_volume_type=dict(default=None),
+ image_provider=dict(default=None),
+ host=dict(default=None),
+ wipe_after_delete=dict(type='bool', default=None),
+ activate=dict(default=None, type='bool'),
+ max_workers=dict(default=None, type='int'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ lun = module.params.get('logical_unit')
+ host = module.params['host']
+ # Fail when host is specified with the LUN id. Lun id is needed to identify
+ # an existing disk if already available inthe environment.
+ if (host and lun is None) or (host and lun.get("id") is None):
+ module.fail_json(
+ msg="Can not use parameter host ({0!s}) without "
+ "specifying the logical_unit id".format(host)
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ disk = None
+ state = module.params['state']
+ auth = module.params.get('auth')
+ connection = create_connection(auth)
+ disks_service = connection.system_service().disks_service()
+ disks_module = DisksModule(
+ connection=connection,
+ module=module,
+ service=disks_service,
+ )
+
+ force_create = False
+ vm_service = get_vm_service(connection, module)
+ if lun:
+ disk = _search_by_lun(disks_service, lun.get('id'))
+ else:
+ disk = disks_module.search_entity(search_params=searchable_attributes(module))
+ if vm_service and disk and state != 'attached':
+ # If the VM don't exist in VMs disks, but still it's found it means it was found
+ # for template with same name as VM, so we should force create the VM disk.
+ force_create = disk.id not in [a.disk.id for a in vm_service.disk_attachments_service().list() if a.disk]
+
+ ret = None
+ # First take care of creating the VM, if needed:
+ if state in ('present', 'detached', 'attached'):
+ # Always activate disk when its being created
+ if vm_service is not None and disk is None:
+ module.params['activate'] = module.params['activate'] is None or module.params['activate']
+ ret = disks_module.create(
+ entity=disk if not force_create else None,
+ result_state=otypes.DiskStatus.OK if lun is None else None,
+ search_params=searchable_attributes(module),
+ fail_condition=lambda d: d.status == otypes.DiskStatus.ILLEGAL if lun is None else False,
+ force_create=force_create,
+ _wait=True if module.params['upload_image_path'] else module.params['wait'],
+ )
+ is_new_disk = ret['changed']
+ ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id'])
+ # We need to pass ID to the module, so in case we want detach/attach disk
+ # we have this ID specified to attach/detach method:
+ module.params['id'] = ret['id']
+
+ # Upload disk image in case it's new disk or force parameter is passed:
+ if module.params['upload_image_path'] and (is_new_disk or module.params['force']):
+ if module.params['format'] == 'cow' and module.params['content_type'] == 'iso':
+ module.warn("To upload an ISO image 'format' parameter needs to be set to 'raw'.")
+ uploaded = upload_disk_image(connection, module)
+ ret['changed'] = ret['changed'] or uploaded
+ # Download disk image in case it's file don't exist or force parameter is passed:
+ if (
+ module.params['download_image_path'] and (not os.path.isfile(module.params['download_image_path']) or module.params['force'])
+ ):
+ downloaded = download_disk_image(connection, module)
+ ret['changed'] = ret['changed'] or downloaded
+
+ # Disk sparsify, only if disk is of image type:
+ if not module.check_mode:
+ disk = disks_service.disk_service(module.params['id']).get()
+ if disk.storage_type == otypes.DiskStorageType.IMAGE:
+ ret = disks_module.action(
+ action='sparsify',
+ action_condition=lambda d: module.params['sparsify'],
+ wait_condition=lambda d: d.status == otypes.DiskStatus.OK,
+ )
+
+ # Export disk as image to glance domain
+ elif state == 'exported':
+ disk = disks_module.search_entity()
+ if disk is None:
+ module.fail_json(
+ msg="Can not export given disk '%s', it doesn't exist" %
+ module.params.get('name') or module.params.get('id')
+ )
+ if disk.storage_type == otypes.DiskStorageType.IMAGE:
+ ret = disks_module.action(
+ action='export',
+ action_condition=lambda d: module.params['image_provider'],
+ wait_condition=lambda d: d.status == otypes.DiskStatus.OK,
+ storage_domain=otypes.StorageDomain(name=module.params['image_provider']),
+ )
+ elif state == 'imported':
+ glance_service = connection.system_service().openstack_image_providers_service()
+ image_provider = search_by_name(glance_service, module.params['image_provider'])
+ images_service = glance_service.service(image_provider.id).images_service()
+ entity_id = get_id_by_name(images_service, module.params['name'])
+ images_service.service(entity_id).import_(
+ storage_domain=otypes.StorageDomain(
+ name=module.params['storage_domain']
+ ) if module.params['storage_domain'] else None,
+ disk=otypes.Disk(
+ name=module.params['name']
+ ),
+ import_as_template=False,
+ )
+ # Wait for disk to appear in system:
+ disk = disks_module.wait_for_import(
+ condition=lambda t: t.status == otypes.DiskStatus.OK
+ )
+ ret = disks_module.create(result_state=otypes.DiskStatus.OK)
+ elif state == 'absent':
+ ret = disks_module.remove()
+
+ # If VM was passed attach/detach disks to/from the VM:
+ if vm_service:
+ disk_attachments_service = vm_service.disk_attachments_service()
+ disk_attachments_module = DiskAttachmentsModule(
+ connection=connection,
+ module=module,
+ service=disk_attachments_service,
+ changed=ret['changed'] if ret else False,
+ )
+
+ if state == 'present' or state == 'attached':
+ ret = disk_attachments_module.create()
+ if lun is None:
+ wait(
+ service=disk_attachments_service.service(ret['id']),
+ condition=lambda d: follow_link(connection, d.disk).status == otypes.DiskStatus.OK,
+ wait=module.params['wait'],
+ timeout=module.params['timeout'],
+ )
+ elif state == 'detached':
+ ret = disk_attachments_module.remove()
+ elif any([
+ module.params.get('interface'),
+ module.params.get('activate'),
+ module.params.get('bootable'),
+ module.params.get('uses_scsi_reservation'),
+ module.params.get('pass_discard'), ]):
+ module.warn("Cannot use 'interface', 'activate', 'bootable', 'uses_scsi_reservation' or 'pass_discard' without specifing VM.")
+
+ # When the host parameter is specified and the disk is not being
+ # removed, refresh the information about the LUN.
+ if state != 'absent' and host:
+ hosts_service = connection.system_service().hosts_service()
+ host_id = get_id_by_name(hosts_service, host)
+ disks_service.disk_service(disk.id).refresh_lun(otypes.Host(id=host_id))
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_info.py
new file mode 100644
index 000000000..b3237fcd0
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_info.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_disk_info
+short_description: Retrieve information about one or more oVirt/RHV disks
+version_added: "1.0.0"
+author: "Katerina Koukiou (@KKoukiou)"
+description:
+ - "Retrieve information about one or more oVirt/RHV disks."
+ - This module was called C(ovirt_disk_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_disk_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_disks), which
+ contains a list of disks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search Disk X from storage Y use following pattern:
+ name=X and storage.name=Y"
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/disk/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all Disks which names start with C(centos)
+- ovirt.ovirt.ovirt_disk_info:
+ pattern: name=centos*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_disks }}"
+'''
+
+RETURN = '''
+ovirt_disks:
+ description: "List of dictionaries describing the Disks. Disk attributes are mapped to dictionary keys,
+ all Disks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ disks_service = connection.system_service().disks_service()
+ disks = disks_service.list(
+ search=module.params['pattern'],
+ follow=",".join(module.params['follow'])
+ )
+ result = dict(
+ ovirt_disks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in disks
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_profile.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_profile.py
new file mode 100644
index 000000000..b2c782c85
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_profile.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2022 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_disk_profile
+short_description: "Module to manage storage domain disk profiles in ovirt"
+author:
+- "Niall O Donnell (@odonnelln)"
+description:
+ - "Module to manage storage domain disk profiles in ovirt."
+options:
+ id:
+ description:
+ - "ID of the disk profile to manage. Either C(id) or C(name) is required."
+ type: str
+ name:
+ description:
+ - "Name of the disk profile to manage. Either C(id) or C(name)/C(alias) is required."
+ type: str
+ description:
+ description:
+ - "Description of the disk profile."
+ type: str
+ comment:
+ description:
+ - "Comment of the disk profile."
+ type: str
+ storage_domain:
+ description:
+ - "Name of the storage domain where the disk profile should be created."
+ type: str
+ data_center:
+ description:
+ - "Name of the data center where the qos entry has been created."
+ type: str
+ qos:
+ description:
+ - "Name of the QoS entry on the disk profile. If not passed defaults to ovirt HE default"
+ type: str
+ state:
+ description:
+ - "Should the disk profile be present/absent."
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+- name: Create a new disk profile on storage_domain_01 using the test_qos QoS in the Default datacenter
+ ovirt.ovirt.ovirt_disk_profile:
+ auth: "{{ ovirt_auth }}"
+ data_center: "Default"
+ name: "test_disk_profile"
+ state: "present"
+ storage_domain: "storage_domain_01"
+ qos: "test_qos"
+
+- name: Create a new disk profile on storage_domain_01 in the Default datacenter using the HE default qos
+ ovirt.ovirt.ovirt_disk_profile:
+ auth: "{{ ovirt_auth }}"
+ data_center: "Default"
+ name: "test_disk_profile"
+ state: "present"
+ storage_domain: "storage_domain_01"
+
+- name: Remove the test_qos disk profile
+ ovirt.ovirt.ovirt_disk_profile:
+ auth: "{{ ovirt_auth }}"
+ data_center: "Default"
+ name: "test_disk_profile"
+ state: "absent"
+ storage_domain: "storage_domain_01"
+ qos: "test_qos"
+'''
+
+RETURN = '''
+id:
+ description: "ID of the managed disk profile"
+ returned: "On success if disk profile is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+disk_profile:
+ description: "Dictionary of all the disk profile attributes. Disk profile attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk_profile."
+ returned: "On success if disk profile is found."
+ type: dict
+'''
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ ovirt_full_argument_spec,
+ get_id_by_name,
+ get_entity
+)
+
+
+class DiskProfileModule(BaseModule):
+
+ def _get_qos(self):
+ """
+ Gets the QoS entry if exists
+
+ :return: otypes.QoS or None
+ """
+ dc_name = self._module.params.get('data_center')
+ dcs_service = self._connection.system_service().data_centers_service()
+ qos_service = dcs_service.data_center_service(get_id_by_name(dcs_service, dc_name)).qoss_service()
+ return get_entity(qos_service.qos_service(get_id_by_name(qos_service, self._module.params.get('qos'))))
+
+ def _get_storage_domain(self):
+ """
+ Gets the storage domain
+
+ :return: otypes.StorageDomain or None
+ """
+ storage_domain_name = self._module.params.get('storage_domain')
+ storage_domains_service = self._connection.system_service().storage_domains_service()
+ return get_entity(storage_domains_service.storage_domain_service(get_id_by_name(storage_domains_service, storage_domain_name)))
+
+ def build_entity(self):
+ """
+ Abstract method from BaseModule called from create() and remove()
+
+ Builds the disk profile from the given params
+
+ :return: otypes.DiskProfile
+ """
+ qos = self._get_qos()
+ storage_domain = self._get_storage_domain()
+
+ if qos is None:
+ raise Exception(
+ "The qos: {0} does not exist in data center: {1}".format(self._module.params.get('qos'), self._module.params.get('data_center'))
+ )
+ if storage_domain is None:
+ raise Exception(
+ "The storage domain: {0} does not exist.".format(self._module.params.get('storage_domain'))
+ )
+ return otypes.DiskProfile(
+ name=self._module.params.get('name') if self._module.params.get('name') else None,
+ id=self._module.params.get('id') if self._module.params.get('id') else None,
+ comment=self._module.params.get('comment'),
+ description=self._module.params.get('description'),
+ qos=qos,
+ storage_domain=storage_domain,
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(default=None),
+ comment=dict(default=None),
+ storage_domain=dict(default=None),
+ data_center=dict(default=None),
+ qos=dict(default=None),
+ description=dict(default=None)
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[['id', 'name']],
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+
+ disk_profiles_service = connection.system_service().disk_profiles_service()
+
+ disk_profile_module = DiskProfileModule(
+ connection=connection,
+ module=module,
+ service=disk_profiles_service,
+ )
+ state = module.params.get('state')
+ if state == 'present':
+ ret = disk_profile_module.create()
+ elif state == 'absent':
+ ret = disk_profile_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event.py
new file mode 100644
index 000000000..dc0bb6473
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_event
+short_description: Create or delete an event in oVirt/RHV
+version_added: "1.0.0"
+author: "Chris Keller (@nasx)"
+description:
+ - "This module can be used to create or delete an event in oVirt/RHV."
+options:
+ state:
+ description:
+ - "Should the event be present/absent."
+ - "The C(wait) option must be set to false when state is absent."
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ description:
+ description:
+ - "Message for the event."
+ - "Required when state is present."
+ type: str
+ severity:
+ description:
+ - "Severity of the event."
+ - "Required when state is present."
+ choices: ['error', 'normal', 'warning']
+ default: normal
+ type: str
+ origin:
+ description:
+ - "Originator of the event."
+ - "Required when state is present."
+ type: str
+ custom_id:
+ description:
+ - "Custom ID for the event. This ID must be unique for each event."
+ - "Required when state is present."
+ type: int
+ id:
+ description:
+ - "The event ID in the oVirt/RHV audit_log table. This ID is not the same as
+ custom_id and is only used when state is absent."
+ - "Required when state is absent."
+ type: str
+ correlation_id:
+ description:
+ - "The event correlation identifier. If not specified, the 'correlation-id' in the
+ connection header will be used. If neither are available, it is not set."
+ type: str
+ cluster:
+ description:
+ - "The id of the cluster associated with this event."
+ type: str
+ data_center:
+ description:
+ - "The id of the data center associated with this event."
+ type: str
+ host:
+ description:
+ - "The id of the host associated with this event."
+ type: str
+ storage_domain:
+ description:
+ - "The id of the storage domain associated with this event."
+ type: str
+ template:
+ description:
+ - "The id of the template associated with this event."
+ type: str
+ user:
+ description:
+ - "The id of the user associated with this event."
+ type: str
+ vm:
+ description:
+ - "The id of the VM associated with this event."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain the auth parameter for simplicity,
+# look at the ovirt_auth module to see how to reuse authentication.
+
+- name: Create an event
+ ovirt.ovirt.ovirt_event:
+ state: present
+ description: "The file system /home on host xyz is almost full!"
+ origin: "mymonitor"
+ custom_id: 123456789
+ severity: warning
+
+- name: Create an event and link it to a specific object
+ ovirt.ovirt.ovirt_event:
+ state: present
+ description: "The file system /home is almost full!"
+ origin: "mymonitor"
+ custom_id: 123456789
+ severity: warning
+ vm: "c79db183-46ef-44d1-95f9-1a368c516c19"
+
+- name: Remove an event
+ ovirt.ovirt.ovirt_event:
+ state: absent
+ id: 123456789
+ wait: false
+'''
+
+RETURN = '''
+id:
+ description: "ID of the event that was created."
+ returned: "On success."
+ type: str
+event:
+ description: "Dictionary of all the Event attributes. All event attributes can be found at the following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/event"
+ returned: "On success."
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+class EventsModule(BaseModule):
+
+ def build_entity(self):
+ correlation_id = None
+ if self._module.params['correlation_id'] is not None:
+ correlation_id = self._module.params['correlation_id']
+ elif self._connection._headers.get('correlation-id') is not None:
+ correlation_id = self._connection._headers.get('correlation-id')
+
+ return otypes.Event(
+ description=self._module.params['description'],
+ severity=otypes.LogSeverity(self._module.params['severity']),
+ origin=self._module.params['origin'],
+ custom_id=self._module.params['custom_id'],
+ id=self._module.params['id'],
+ correlation_id=correlation_id,
+ cluster=otypes.Cluster(
+ id=self._module.params['cluster']
+ ) if self._module.params['cluster'] is not None else None,
+ data_center=otypes.DataCenter(
+ id=self._module.params['data_center']
+ ) if self._module.params['data_center'] is not None else None,
+ host=otypes.Host(
+ id=self._module.params['host']
+ ) if self._module.params['host'] is not None else None,
+ storage_domain=otypes.StorageDomain(
+ id=self._module.params['storage_domain']
+ ) if self._module.params['storage_domain'] is not None else None,
+ template=otypes.Template(
+ id=self._module.params['template']
+ ) if self._module.params['template'] is not None else None,
+ user=otypes.User(
+ id=self._module.params['user']
+ ) if self._module.params['user'] is not None else None,
+ vm=otypes.Vm(
+ id=self._module.params['vm']
+ ) if self._module.params['vm'] is not None else None,
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ description=dict(default=None),
+ severity=dict(
+ choices=['error', 'normal', 'warning'],
+ default='normal',
+ ),
+ origin=dict(default=None),
+ custom_id=dict(default=None, type='int'),
+ id=dict(default=None),
+ correlation_id=dict(default=None),
+ cluster=dict(default=None),
+ data_center=dict(default=None),
+ host=dict(default=None),
+ storage_domain=dict(default=None),
+ template=dict(default=None),
+ user=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ check_sdk(module)
+
+ # Wait must be set to false if state == absent
+
+ if module.params['state'] == 'absent' and module.params['wait'] is not False:
+ module.fail_json(msg='When "state" is absent, "wait" must be set to false.')
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ events_service = connection.system_service().events_service()
+ events_module = EventsModule(
+ connection=connection,
+ module=module,
+ service=events_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = events_module.create()
+ elif state == 'absent':
+ ret = events_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event_info.py
new file mode 100644
index 000000000..7427b19c1
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event_info.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_event_info
+short_description: This module can be used to retrieve information about one or more oVirt/RHV events
+version_added: "1.0.0"
+author: "Chris Keller (@nasx)"
+description:
+ - "Retrieve information about one or more oVirt/RHV events."
+ - This module was called C(ovirt_event_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_event_info) module no longer returns C(ansible_facts)!
+options:
+ case_sensitive:
+ description:
+ - "Indicates if the search performed using the search parameter should be performed taking case
+ into account. The default value is true, which means that case is taken into account. If you
+ want to search ignoring case set it to false."
+ required: false
+ default: true
+ type: bool
+ from_:
+ description:
+ - "Indicates the event index after which events should be returned. The indexes of events are
+ strictly increasing, so when this parameter is used only the events with greater indexes
+ will be returned."
+ required: false
+ type: int
+ max:
+ description:
+ - "Sets the maximum number of events to return. If not specified all the events are returned."
+ required: false
+ type: int
+ search:
+ description:
+ - "Search term which is accepted by the oVirt/RHV API."
+ - "For example to search for events of severity alert use the following pattern: severity=alert"
+ required: false
+ type: str
+ headers:
+ description:
+ - "Additional HTTP headers."
+ required: false
+ type: str
+ query:
+ description:
+ - "Additional URL query parameters."
+ required: false
+ type: str
+ wait:
+ description:
+ - "If True wait for the response."
+ required: false
+ default: true
+ type: bool
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/event/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain the auth parameter for simplicity,
+# look at the ovirt_auth module to see how to reuse authentication.
+
+- name: Return all events
+ ovirt.ovirt.ovirt_event_info:
+ register: result
+
+- name: Return the last 10 events
+ ovirt.ovirt.ovirt_event_info:
+ max: 10
+ register: result
+
+- name: Return all events of type alert
+ ovirt.ovirt.ovirt_event_info:
+ search: "severity=alert"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_events }}"
+'''
+
+RETURN = '''
+ovirt_events:
+ description: "List of dictionaries describing the events. Event attributes are mapped to dictionary keys.
+ All event attributes can be found at the following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/event"
+ returned: On success."
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ case_sensitive=dict(default=True, type='bool', required=False),
+ from_=dict(default=None, type='int', required=False),
+ max=dict(default=None, type='int', required=False),
+ search=dict(default='', required=False),
+ headers=dict(default='', required=False),
+ query=dict(default='', required=False),
+ wait=dict(default=True, type='bool', required=False)
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ events_service = connection.system_service().events_service()
+ events = events_service.list(
+ case_sensitive=module.params['case_sensitive'],
+ from_=module.params['from_'],
+ max=module.params['max'],
+ search=module.params['search'],
+ headers=module.params['headers'],
+ query=module.params['query'],
+ wait=module.params['wait'],
+ follow=",".join(module.params['follow'])
+ )
+
+ result = dict(
+ ovirt_events=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in events
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider.py
new file mode 100644
index 000000000..842b45a36
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_external_provider
+short_description: Module to manage external providers in oVirt/RHV
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage external providers in oVirt/RHV"
+options:
+ name:
+ description:
+ - "Name of the external provider to manage."
+ type: str
+ state:
+ description:
+ - "Should the external be present or absent"
+ - "When you are using absent for I(os_volume), you need to make
+ sure that SD is not attached to the data center!"
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ description:
+ description:
+ - "Description of the external provider."
+ type: str
+ type:
+ description:
+ - "Type of the external provider."
+ choices: ['os_image', 'network', 'os_volume', 'foreman']
+ required: true
+ type: str
+ aliases: ['provider']
+ url:
+ description:
+ - "URL where external provider is hosted."
+ - "Applicable for those types: I(os_image), I(os_volume), I(network) and I(foreman)."
+ type: str
+ username:
+ description:
+ - "Username to be used for login to external provider."
+ - "Applicable for all types."
+ type: str
+ password:
+ description:
+ - "Password of the user specified in C(username) parameter."
+ - "Applicable for all types."
+ type: str
+ tenant_name:
+ description:
+ - "Name of the tenant."
+ - "Applicable for those types: I(os_image), I(os_volume) and I(network)."
+ aliases: ['tenant']
+ type: str
+ authentication_url:
+ description:
+ - "Keystone authentication URL of the openstack provider."
+ - "Applicable for those types: I(os_image), I(os_volume) and I(network)."
+ aliases: ['auth_url']
+ type: str
+ data_center:
+ description:
+ - "Name of the data center where provider should be attached."
+ - "Applicable for those type: I(os_volume)."
+ type: str
+ read_only:
+ description:
+ - "Specify if the network should be read only."
+ - "Applicable if C(type) is I(network)."
+ type: bool
+ network_type:
+ description:
+ - "Type of the external network provider either external (for example OVN) or neutron."
+ - "Applicable if C(type) is I(network)."
+ choices: ['external', 'neutron']
+ default: 'external'
+ type: str
+ authentication_keys:
+ description:
+ - "List of authentication keys."
+ - "When you will not pass these keys and there are already some
+ of them defined in the system they will be removed."
+ - "Applicable for I(os_volume)."
+ suboptions:
+ uuid:
+ description:
+ - The uuid which will be used.
+ value:
+ description:
+ - The value which will be used.
+ default: []
+ type: list
+ elements: dict
+ aliases: ['auth_keys']
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add image external provider:
+- ovirt.ovirt.ovirt_external_provider:
+ name: image_provider
+ type: os_image
+ url: http://1.2.3.4:9292
+ username: admin
+ password: 123456
+ tenant: admin
+ auth_url: http://1.2.3.4:35357/v2.0
+
+# Add volume external provider:
+- ovirt.ovirt.ovirt_external_provider:
+ name: image_provider
+ type: os_volume
+ url: http://1.2.3.4:9292
+ username: admin
+ password: 123456
+ tenant: admin
+ auth_url: http://1.2.3.4:5000/v2.0
+ authentication_keys:
+ -
+ uuid: "1234567-a1234-12a3-a234-123abc45678"
+ value: "ABCD00000000111111222333445w=="
+
+# Add foreman provider:
+- ovirt.ovirt.ovirt_external_provider:
+ name: foreman_provider
+ type: foreman
+ url: https://foreman.example.com
+ username: admin
+ password: 123456
+
+# Add external network provider for OVN:
+- ovirt.ovirt.ovirt_external_provider:
+ name: ovn_provider
+ type: network
+ network_type: external
+ url: http://1.2.3.4:9696
+
+# Remove image external provider:
+- ovirt.ovirt.ovirt_external_provider:
+ state: absent
+ name: image_provider
+ type: os_image
+'''
+
+RETURN = '''
+id:
+ description: ID of the external provider which is managed
+ returned: On success if external provider is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+external_host_provider:
+ description: "Dictionary of all the external_host_provider attributes. External provider attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
+ returned: "On success and if parameter 'type: foreman' is used."
+ type: dict
+openstack_image_provider:
+ description: "Dictionary of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
+ returned: "On success and if parameter 'type: os_image' is used."
+ type: dict
+openstack_volume_provider:
+ description: "Dictionary of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
+ returned: "On success and if parameter 'type: os_volume' is used."
+ type: dict
+openstack_network_provider:
+ description: "Dictionary of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
+ returned: "On success and if parameter 'type: network' is used."
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_params,
+ check_sdk,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+)
+
+
+OS_VOLUME = 'os_volume'
+OS_IMAGE = 'os_image'
+NETWORK = 'network'
+FOREMAN = 'foreman'
+
+
+class ExternalProviderModule(BaseModule):
+
+ non_provider_params = ['type', 'authentication_keys', 'data_center']
+
+ def provider_type(self, provider_type):
+ self._provider_type = provider_type
+
+ def provider_module_params(self):
+ provider_params = [
+ (key, value) for key, value in self._module.params.items() if key
+ not in self.non_provider_params
+ ]
+ provider_params.append(('data_center', self.get_data_center()))
+ return provider_params
+
+ def get_data_center(self):
+ dc_name = self._module.params.get("data_center", None)
+ if dc_name:
+ system_service = self._connection.system_service()
+ data_centers_service = system_service.data_centers_service()
+ return data_centers_service.list(
+ search='name=%s' % dc_name,
+ )[0]
+ return dc_name
+
+ def build_entity(self):
+ provider_type = self._provider_type(
+ requires_authentication=self._module.params.get('username') is not None,
+ )
+ if self._module.params.pop('type') == NETWORK:
+ setattr(
+ provider_type,
+ 'type',
+ otypes.OpenStackNetworkProviderType(self._module.params.pop('network_type'))
+ )
+
+ for key, value in self.provider_module_params():
+ if hasattr(provider_type, key):
+ setattr(provider_type, key, value)
+
+ return provider_type
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('url'), entity.url) and
+ equal(self._module.params.get('authentication_url'), entity.authentication_url) and
+ equal(self._module.params.get('tenant_name'), getattr(entity, 'tenant_name', None)) and
+ equal(self._module.params.get('username'), entity.username)
+ )
+
+ def update_volume_provider_auth_keys(
+ self, provider, providers_service, keys
+ ):
+ """
+ Update auth keys for volume provider, if not exist add them or remove
+ if they are not specified and there are already defined in the external
+ volume provider.
+
+ Args:
+ provider (dict): Volume provider details.
+ providers_service (openstack_volume_providers_service): Provider
+ service.
+ keys (list): Keys to be updated/added to volume provider, each key
+ is represented as dict with keys: uuid, value.
+ """
+
+ provider_service = providers_service.provider_service(provider['id'])
+ auth_keys_service = provider_service.authentication_keys_service()
+ provider_keys = auth_keys_service.list()
+ # removing keys which are not defined
+ for key in [
+ k.id for k in provider_keys if k.uuid not in [
+ defined_key['uuid'] for defined_key in keys
+ ]
+ ]:
+ self.changed = True
+ if not self._module.check_mode:
+ auth_keys_service.key_service(key).remove()
+ if not (provider_keys or keys):
+ # Nothing need to do when both are empty.
+ return
+ for key in keys:
+ key_id_for_update = None
+ for existing_key in provider_keys:
+ if key['uuid'] == existing_key.uuid:
+ key_id_for_update = existing_key.id
+
+ auth_key_usage_type = (
+ otypes.OpenstackVolumeAuthenticationKeyUsageType("ceph")
+ )
+ auth_key = otypes.OpenstackVolumeAuthenticationKey(
+ usage_type=auth_key_usage_type,
+ uuid=key['uuid'],
+ value=key['value'],
+ )
+
+ if not key_id_for_update:
+ self.changed = True
+ if not self._module.check_mode:
+ auth_keys_service.add(auth_key)
+ else:
+ # We cannot really distinguish here if it was really updated cause
+ # we cannot take key value to check if it was changed or not. So
+ # for sure we update here always.
+ self.changed = True
+ if not self._module.check_mode:
+ auth_key_service = (
+ auth_keys_service.key_service(key_id_for_update)
+ )
+ auth_key_service.update(auth_key)
+
+
+def _external_provider_service(provider_type, system_service):
+ if provider_type == OS_IMAGE:
+ return otypes.OpenStackImageProvider, system_service.openstack_image_providers_service()
+ elif provider_type == NETWORK:
+ return otypes.OpenStackNetworkProvider, system_service.openstack_network_providers_service()
+ elif provider_type == OS_VOLUME:
+ return otypes.OpenStackVolumeProvider, system_service.openstack_volume_providers_service()
+ elif provider_type == FOREMAN:
+ return otypes.ExternalHostProvider, system_service.external_host_providers_service()
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(default=None),
+ description=dict(default=None),
+ type=dict(
+ required=True,
+ choices=[
+ OS_IMAGE, NETWORK, OS_VOLUME, FOREMAN,
+ ],
+ aliases=['provider'],
+ ),
+ url=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ tenant_name=dict(default=None, aliases=['tenant']),
+ authentication_url=dict(default=None, aliases=['auth_url']),
+ data_center=dict(default=None),
+ read_only=dict(default=None, type='bool'),
+ network_type=dict(
+ default='external',
+ choices=['external', 'neutron'],
+ ),
+ authentication_keys=dict(
+ default=[], aliases=['auth_keys'], type='list', no_log=True, elements='dict'
+ ),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ provider_type_param = module.params.get('type')
+ provider_type, external_providers_service = _external_provider_service(
+ provider_type=provider_type_param,
+ system_service=connection.system_service(),
+ )
+ external_providers_module = ExternalProviderModule(
+ connection=connection,
+ module=module,
+ service=external_providers_service,
+ )
+ external_providers_module.provider_type(provider_type)
+
+ state = module.params.pop('state')
+ if state == 'absent':
+ ret = external_providers_module.remove()
+ elif state == 'present':
+ ret = external_providers_module.create()
+ openstack_volume_provider_id = ret.get('id')
+ if (
+ provider_type_param == OS_VOLUME and
+ openstack_volume_provider_id
+ ):
+ external_providers_module.update_volume_provider_auth_keys(
+ ret, external_providers_service,
+ module.params.get('authentication_keys'),
+ )
+
+ module.exit_json(**ret)
+
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider_info.py
new file mode 100644
index 000000000..d47639196
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider_info.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_external_provider_info
+short_description: Retrieve information about one or more oVirt/RHV external providers
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV external providers."
+ - This module was called C(ovirt_external_provider_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_external_provider_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_external_providers), which
+ contains a list of external_providers. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ type:
+ description:
+ - "Type of the external provider."
+ choices: ['os_image', 'os_network', 'os_volume', 'foreman']
+ required: true
+ type: str
+ aliases: ['provider']
+ name:
+ description:
+ - "Name of the external provider, can be used as glob expression."
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "For type C(foreman), all follow parameters can be found at following url:
+ https://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider/links_summary"
+ - "For type C(os_image), all follow parameters can be found at following url:
+ https://ovirt.github.io/ovirt-engine-api-model/master/#types/open_stack_image_provider/links_summary"
+ - "For type C(os_volume), all follow parameters can be found at following url:
+ https://ovirt.github.io/ovirt-engine-api-model/master/#types/open_stack_volume_provider/links_summary"
+ - "For type C(os_network), all follow parameters can be found at following url:
+ https://ovirt.github.io/ovirt-engine-api-model/master/#types/open_stack_network_provider/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all image external providers named C<glance>:
+- ovirt.ovirt.ovirt_external_provider_info:
+ type: os_image
+ name: glance
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_external_providers }}"
+'''
+
+RETURN = '''
+ovirt_external_providers:
+ description:
+ - "List of dictionaries. Content depends on I(type)."
+ - "For type C(foreman), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
+ - "For type C(os_image), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/open_stack_image_provider."
+ - "For type C(os_volume), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/open_stack_volume_provider."
+ - "For type C(os_network), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/open_stack_network_provider."
+ returned: On success
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def _external_provider_service(provider_type, system_service):
+ if provider_type == 'os_image':
+ return system_service.openstack_image_providers_service()
+ elif provider_type == 'os_network':
+ return system_service.openstack_network_providers_service()
+ elif provider_type == 'os_volume':
+ return system_service.openstack_volume_providers_service()
+ elif provider_type == 'foreman':
+ return system_service.external_host_providers_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None, required=False),
+ type=dict(
+ required=True,
+ choices=[
+ 'os_image', 'os_network', 'os_volume', 'foreman',
+ ],
+ aliases=['provider'],
+ ),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ external_providers_service = _external_provider_service(
+ provider_type=module.params.pop('type'),
+ system_service=connection.system_service(),
+ )
+ if module.params['name']:
+ external_providers = [
+ e for e in external_providers_service.list(follow=",".join(module.params['follow']))
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ external_providers = external_providers_service.list(follow=",".join(module.params['follow']))
+
+ result = dict(
+ ovirt_external_providers=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in external_providers
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group.py
new file mode 100644
index 000000000..5e154ff6d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_group
+short_description: Module to manage groups in oVirt/RHV
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage groups in oVirt/RHV"
+options:
+ name:
+ description:
+ - "Name of the group to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the group be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ authz_name:
+ description:
+ - "Authorization provider of the group. In previous versions of oVirt/RHV known as domain."
+ required: true
+ aliases: ['domain']
+ type: str
+ namespace:
+ description:
+ - "Namespace of the authorization provider, where group resides."
+ required: false
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add group group1 from authorization provider example.com-authz
+- ovirt.ovirt.ovirt_group:
+ name: group1
+ domain: example.com-authz
+
+# Add group group1 from authorization provider example.com-authz
+# In case of multi-domain Active Directory setup, you should pass
+# also namespace, so it adds correct group:
+- ovirt.ovirt.ovirt_group:
+ name: group1
+ namespace: dc=ad2,dc=example,dc=com
+ domain: example.com-authz
+
+# Remove group group1 with authorization provider example.com-authz
+- ovirt.ovirt.ovirt_group:
+ state: absent
+ name: group1
+ domain: example.com-authz
+'''
+
+RETURN = '''
+id:
+ description: ID of the group which is managed
+ returned: On success if group is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+group:
+ description: "Dictionary of all the group attributes. Group attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
+ returned: On success if group is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+)
+
+
+def _group(connection, module):
+ groups = connection.system_service().groups_service().list(
+ search="name={name}".format(
+ name=module.params['name'],
+ )
+ )
+
+ # If found more groups, filter them by namespace and authz name:
+ # (filtering here, as oVirt/RHV backend doesn't support it)
+ if len(groups) > 1:
+ groups = [
+ g for g in groups if (
+ equal(module.params['namespace'], g.namespace) and
+ equal(module.params['authz_name'], g.domain.name)
+ )
+ ]
+ return groups[0] if groups else None
+
+
+class GroupsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Group(
+ domain=otypes.Domain(
+ name=self._module.params['authz_name']
+ ),
+ name=self._module.params['name'],
+ namespace=self._module.params['namespace'],
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ authz_name=dict(required=True, aliases=['domain']),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ groups_service = connection.system_service().groups_service()
+ groups_module = GroupsModule(
+ connection=connection,
+ module=module,
+ service=groups_service,
+ )
+ group = _group(connection, module)
+ state = module.params['state']
+ if state == 'present':
+ ret = groups_module.create(entity=group)
+ elif state == 'absent':
+ ret = groups_module.remove(entity=group)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group_info.py
new file mode 100644
index 000000000..2385a5a3a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group_info.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_group_info
+short_description: Retrieve information about one or more oVirt/RHV groups
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV groups."
+ - This module was called C(ovirt_group_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_group_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_groups), which
+ contains a list of groups. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search group X use following pattern: name=X"
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/group/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all groups which names start with C(admin):
+- ovirt.ovirt.ovirt_group_info:
+ pattern: name=admin*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_groups }}"
+'''
+
+RETURN = '''
+ovirt_groups:
+ description: "List of dictionaries describing the groups. Group attributes are mapped to dictionary keys,
+ all groups attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ groups_service = connection.system_service().groups_service()
+ groups = groups_service.list(
+ search=module.params['pattern'],
+ follow=",".join(module.params['follow'])
+ )
+ result = dict(
+ ovirt_groups=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in groups
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host.py
new file mode 100644
index 000000000..dbf9e7d20
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host.py
@@ -0,0 +1,760 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host
+short_description: Module to manage hosts in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage hosts in oVirt/RHV"
+options:
+ id:
+ description:
+ - "ID of the host to manage."
+ type: str
+ name:
+ description:
+ - "Name of the host to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "State which should a host to be in after successful completion."
+ - "I(iscsilogin) and I(iscsidiscover) are supported since version 2.4."
+ choices: [
+ 'present', 'absent', 'maintenance', 'upgraded', 'started',
+ 'restarted', 'stopped', 'reinstalled', 'iscsidiscover', 'iscsilogin'
+ ]
+ default: present
+ type: str
+ comment:
+ description:
+ - "Description of the host."
+ type: str
+ timeout:
+ description:
+ - "The amount of time in seconds the module should wait for the host to
+ get into desired state."
+ default: 600
+ cluster:
+ description:
+ - "Name of the cluster, where host should be created."
+ type: str
+ address:
+ description:
+ - "Host address. It can be either FQDN (preferred) or IP address."
+ type: str
+ password:
+ description:
+ - "Password of the root. It's required in case C(public_key) is set to I(False)."
+ type: str
+ ssh_port:
+ description:
+ - "The host SSH port."
+ type: int
+ public_key:
+ description:
+ - "I(True) if the public key should be used to authenticate to host."
+ - "It's required in case C(password) is not set."
+ default: False
+ type: bool
+ aliases: ['ssh_public_key']
+ enroll_certificate:
+ description:
+ - "Enrolls the certificate of the host. Useful in case you get a warning that it is about to expire or has already expired."
+ - "The host must be in maintenance status before enrolling the certificates."
+ default: False
+ type: bool
+ kdump_integration:
+ description:
+ - "Specify if host will have enabled Kdump integration."
+ choices: ['enabled', 'disabled']
+ type: str
+ spm_priority:
+ description:
+ - "SPM priority of the host. Integer value from 1 to 10, where higher number means higher priority."
+ type: int
+ override_iptables:
+ description:
+ - "If True host iptables will be overridden by host deploy script."
+ - "Note that C(override_iptables) is I(false) by default in oVirt/RHV."
+ type: bool
+ force:
+ description:
+ - "Indicates that the host should be removed even if it is non-responsive,
+ or if it is part of a Gluster Storage cluster and has volume bricks on it."
+ - "WARNING: It doesn't forcibly remove the host if another host related operation is being executed on the host at the same time."
+ default: False
+ type: bool
+ override_display:
+ description:
+ - "Override the display address of all VMs on this host with specified address."
+ type: str
+ kernel_params:
+ description:
+ - "List of kernel boot parameters."
+ - "Following are most common kernel parameters used for host:"
+ - "Hostdev Passthrough & SR-IOV: intel_iommu=on"
+ - "Nested Virtualization: kvm-intel.nested=1"
+ - "Unsafe Interrupts: vfio_iommu_type1.allow_unsafe_interrupts=1"
+ - "PCI Reallocation: pci=realloc"
+ - "C(Note:)"
+ - "Modifying kernel boot parameters settings can lead to a host boot failure.
+ Please consult the product documentation before doing any changes."
+ - "Kernel boot parameters changes require host deploy and restart. The host needs
+ to be I(reinstalled) successfully and then to be I(rebooted) for kernel boot parameters
+ to be applied."
+ type: list
+ elements: str
+ hosted_engine:
+ description:
+ - "If I(deploy) it means this host should deploy also hosted engine
+ components."
+ - "If I(undeploy) it means this host should un-deploy hosted engine
+ components and this host will not function as part of the High
+ Availability cluster."
+ choices:
+ - 'deploy'
+ - 'undeploy'
+ type: str
+ power_management_enabled:
+ description:
+ - "Enable or disable power management of the host."
+ - "For more comprehensive setup of PM use C(ovirt_host_pm) module."
+ type: bool
+ activate:
+ description:
+ - "If C(state) is I(present) activate the host."
+ - "This parameter is good to disable, when you don't want to change
+ the state of host when using I(present) C(state)."
+ default: True
+ type: bool
+ iscsi:
+ description:
+ - "If C(state) is I(iscsidiscover) it means that the iscsi attribute is being
+ used to discover targets"
+ - "If C(state) is I(iscsilogin) it means that the iscsi attribute is being
+ used to login to the specified targets passed as part of the iscsi attribute"
+ suboptions:
+ username:
+ description:
+ - "A CHAP user name for logging into a target."
+ password:
+ description:
+ - "A CHAP password for logging into a target."
+ address:
+ description:
+ - "Address of the iSCSI storage server."
+ target:
+ description:
+ - "The target IQN for the storage device."
+ port:
+ description:
+ - "The port being used to connect with iscsi."
+ portal:
+ description:
+ - "The portal being used to connect with iscsi."
+ type: dict
+ check_upgrade:
+ description:
+ - "If I(true) and C(state) is I(upgraded) run check for upgrade
+ action before executing upgrade action."
+ default: True
+ type: bool
+ reboot_after_upgrade:
+ description:
+ - "If I(true) and C(state) is I(upgraded) reboot host after successful upgrade."
+ default: True
+ type: bool
+ reboot_after_installation:
+ description:
+ - "If I(true) reboot host after successful installation."
+ - "Default value on engine is I(true)."
+ type: bool
+ vgpu_placement:
+ description:
+ - If I(consolidated), each vGPU is placed on the first physical card with
+ available space. This is the default placement, utilizing all available
+ space on the physical cards.
+ - If I(separated), each vGPU is placed on a separate physical card, if
+ possible. This can be useful for improving vGPU performance.
+ choices: ['consolidated', 'separated']
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add host with username/password supporting SR-IOV.
+# Note that override_iptables is false by default in oVirt/RHV:
+- ovirt.ovirt.ovirt_host:
+ cluster: Default
+ name: myhost
+ address: 10.34.61.145
+ password: secret
+ override_iptables: true
+ kernel_params:
+ - intel_iommu=on
+
+# Add host using public key
+- ovirt.ovirt.ovirt_host:
+ public_key: true
+ cluster: Default
+ name: myhost2
+ address: 10.34.61.145
+ override_iptables: true
+
+# Deploy hosted engine host
+- ovirt.ovirt.ovirt_host:
+ cluster: Default
+ name: myhost2
+ password: secret
+ address: 10.34.61.145
+ override_iptables: true
+ hosted_engine: deploy
+
+# Maintenance
+- ovirt.ovirt.ovirt_host:
+ state: maintenance
+ name: myhost
+
+# Restart host using power management:
+- ovirt.ovirt.ovirt_host:
+ state: restarted
+ name: myhost
+
+# Upgrade host
+- ovirt.ovirt.ovirt_host:
+ state: upgraded
+ name: myhost
+
+# discover iscsi targets
+- ovirt.ovirt.ovirt_host:
+ state: iscsidiscover
+ name: myhost
+ iscsi:
+ username: iscsi_user
+ password: secret
+ address: 10.34.61.145
+ port: 3260
+
+
+# login to iscsi targets
+- ovirt.ovirt.ovirt_host:
+ state: iscsilogin
+ name: myhost
+ iscsi:
+ username: iscsi_user
+ password: secret
+ address: 10.34.61.145
+ target: "iqn.2015-07.com.mlipchuk2.redhat:444"
+ port: 3260
+
+
+# Reinstall host using public key
+- ovirt.ovirt.ovirt_host:
+ state: reinstalled
+ name: myhost
+ public_key: true
+
+# Remove host
+- ovirt.ovirt.ovirt_host:
+ state: absent
+ name: myhost
+ force: True
+
+# Retry removing host when failed (https://bugzilla.redhat.com/show_bug.cgi?id=1719271)
+- ovirt.ovirt.ovirt_host:
+ state: absent
+ name: myhost
+ register: result
+ until: not result.failed
+ retries: 6
+ delay: 20
+
+# Change host Name
+- ovirt.ovirt.ovirt_host:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new host name"
+
+- name: Enroll host certificates
+ ovirt.ovirt.ovirt_host:
+ state: maintenance
+ name: myhost
+ enroll_certificate: True
+'''
+
+RETURN = '''
+id:
+ description: ID of the host which is managed
+ returned: On success if host is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+host:
+ description: "Dictionary of all the host attributes. Host attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host."
+ returned: On success if host is found.
+ type: dict
+iscsi_targets:
+ description: "List of host iscsi targets"
+ returned: On success if host is found and state is iscsidiscover.
+ type: list
+'''
+
+import time
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+
+ from ovirtsdk4.types import HostStatus as hoststate
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ wait,
+ get_dict_of_struct,
+)
+
+
+class HostsModule(BaseModule):
+ def __init__(self, start_event=None, *args, **kwargs):
+ super(HostsModule, self).__init__(*args, **kwargs)
+ self.start_event = start_event
+
+ def build_entity(self):
+ return otypes.Host(
+ id=self._module.params.get('id'),
+ name=self.param('name'),
+ cluster=otypes.Cluster(
+ name=self.param('cluster')
+ ) if self.param('cluster') else None,
+ comment=self.param('comment'),
+ address=self.param('address'),
+ root_password=self.param('password'),
+ ssh=otypes.Ssh(
+ authentication_method=otypes.SshAuthenticationMethod.PUBLICKEY if self.param('public_key') else None,
+ port=self.param('ssh_port'),
+ ),
+ spm=otypes.Spm(
+ priority=self.param('spm_priority'),
+ ) if self.param('spm_priority') else None,
+ override_iptables=self.param('override_iptables'),
+ display=otypes.Display(
+ address=self.param('override_display'),
+ ) if self.param('override_display') else None,
+ os=otypes.OperatingSystem(
+ custom_kernel_cmdline=' '.join(self.param('kernel_params')),
+ ) if self.param('kernel_params') else None,
+ power_management=otypes.PowerManagement(
+ enabled=self.param('power_management_enabled'),
+ kdump_detection=self.param('kdump_integration') == 'enabled',
+ ) if self.param('power_management_enabled') is not None or self.param('kdump_integration') else None,
+ vgpu_placement=otypes.VgpuPlacement(
+ self.param('vgpu_placement')
+ ) if self.param('vgpu_placement') is not None else None,
+ )
+
+ def update_check(self, entity):
+ kernel_params = self.param('kernel_params')
+ return (
+ equal(self.param('comment'), entity.comment) and
+ equal(self.param('kdump_integration'), 'enabled' if entity.power_management.kdump_detection else 'disabled') and
+ equal(self.param('spm_priority'), entity.spm.priority) and
+ equal(self.param('name'), entity.name) and
+ equal(self.param('power_management_enabled'), entity.power_management.enabled) and
+ equal(self.param('override_display'), getattr(entity.display, 'address', None)) and
+ equal(self.param('vgpu_placement'), str(entity.vgpu_placement)) and
+ equal(
+ sorted(kernel_params) if kernel_params else None,
+ sorted(entity.os.custom_kernel_cmdline.split(' '))
+ )
+ )
+
+ def pre_remove(self, entity):
+ self.action(
+ entity=entity,
+ action='deactivate',
+ action_condition=lambda h: h.status != hoststate.MAINTENANCE,
+ wait_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ )
+
+ def post_reinstall(self, host):
+ wait(
+ service=self._service.service(host.id),
+ condition=lambda h: h.status != hoststate.MAINTENANCE,
+ fail_condition=failed_state,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def raise_host_exception(self):
+ events = self._connection.system_service().events_service().list(from_=int(self.start_event.index))
+ error_events = [
+ event.description for event in events
+ if event.host is not None and (event.host.id == self.param('id') or event.host.name == self.param('name')) and
+ event.severity in [otypes.LogSeverity.WARNING, otypes.LogSeverity.ERROR]
+ ]
+ if error_events:
+ raise Exception("Error message: %s" % error_events)
+ return True
+
+ def failed_state_after_reinstall(self, host, count=0):
+ if host.status in [
+ hoststate.ERROR,
+ hoststate.INSTALL_FAILED,
+ hoststate.NON_OPERATIONAL,
+ ]:
+ return self.raise_host_exception()
+
+ # If host is in non-responsive state after upgrade/install
+ # let's wait for few seconds and re-check again the state:
+ if host.status == hoststate.NON_RESPONSIVE:
+ if count <= 3:
+ time.sleep(20)
+ return self.failed_state_after_reinstall(
+ self._service.service(host.id).get(),
+ count + 1,
+ )
+ else:
+ return self.raise_host_exception()
+
+ return False
+
+
+def failed_state(host):
+ return host.status in [
+ hoststate.ERROR,
+ hoststate.INSTALL_FAILED,
+ hoststate.NON_RESPONSIVE,
+ hoststate.NON_OPERATIONAL,
+ ]
+
+
+def control_state(host_module):
+ host = host_module.search_entity()
+ if host is None:
+ return
+
+ state = host_module._module.params['state']
+ host_service = host_module._service.service(host.id)
+ if failed_state(host):
+ # In case host is in INSTALL_FAILED status, we can reinstall it:
+ if hoststate.INSTALL_FAILED == host.status and state != 'reinstalled':
+ raise Exception(
+ "Not possible to manage host '%s' in state '%s'." % (
+ host.name,
+ host.status
+ )
+ )
+ elif host.status in [
+ hoststate.REBOOT,
+ hoststate.CONNECTING,
+ hoststate.INITIALIZING,
+ hoststate.INSTALLING,
+ hoststate.INSTALLING_OS,
+ ]:
+ wait(
+ service=host_service,
+ condition=lambda host: host.status == hoststate.UP,
+ fail_condition=failed_state,
+ )
+ elif host.status == hoststate.PREPARING_FOR_MAINTENANCE:
+ wait(
+ service=host_service,
+ condition=lambda host: host.status == hoststate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+
+ return host
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=[
+ 'present', 'absent', 'maintenance', 'upgraded', 'started',
+ 'restarted', 'stopped', 'reinstalled', 'iscsidiscover', 'iscsilogin'
+ ],
+ default='present',
+ ),
+ name=dict(required=True),
+ id=dict(default=None),
+ comment=dict(default=None),
+ cluster=dict(default=None),
+ address=dict(default=None),
+ ssh_port=dict(default=None, type='int'),
+ password=dict(default=None, no_log=True),
+ public_key=dict(default=False, type='bool', aliases=['ssh_public_key']),
+ enroll_certificate=dict(default=False, type='bool'),
+ kdump_integration=dict(default=None, choices=['enabled', 'disabled']),
+ spm_priority=dict(default=None, type='int'),
+ override_iptables=dict(default=None, type='bool'),
+ force=dict(default=False, type='bool'),
+ reboot_after_installation=dict(default=None, type='bool'),
+ timeout=dict(default=600, type='int'),
+ override_display=dict(default=None),
+ kernel_params=dict(default=None, type='list', elements='str'),
+ hosted_engine=dict(default=None, choices=['deploy', 'undeploy']),
+ power_management_enabled=dict(default=None, type='bool'),
+ activate=dict(default=True, type='bool'),
+ iscsi=dict(default=None, type='dict'),
+ check_upgrade=dict(default=True, type='bool'),
+ reboot_after_upgrade=dict(default=True, type='bool'),
+ vgpu_placement=dict(default=None, choices=['consolidated', 'separated']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ['state', 'iscsidiscover', ['iscsi']],
+ ['state', 'iscsilogin', ['iscsi']]
+ ]
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ start_event = connection.system_service().events_service().list(max=1)[0]
+ hosts_module = HostsModule(
+ connection=connection,
+ module=module,
+ service=hosts_service,
+ start_event=start_event,
+ )
+
+ state = module.params['state']
+ host = control_state(hosts_module)
+ if state == 'present':
+ ret = hosts_module.create(
+ deploy_hosted_engine=(
+ module.params.get('hosted_engine') == 'deploy'
+ ) if module.params.get('hosted_engine') is not None else None,
+ activate=module.params['activate'],
+ reboot=module.params.get('reboot_after_installation'),
+ result_state=(hoststate.MAINTENANCE if module.params['activate'] is False else hoststate.UP) if host is None else None,
+ fail_condition=hosts_module.failed_state_after_reinstall if host is not None else lambda h: False,
+ )
+ if module.params['activate'] and host is not None:
+ ret = hosts_module.action(
+ action='activate',
+ action_condition=lambda h: h.status != hoststate.UP,
+ wait_condition=lambda h: h.status == hoststate.UP,
+ fail_condition=failed_state,
+ )
+ elif state == 'absent':
+ ret = hosts_module.remove()
+ elif state == 'maintenance':
+ hosts_module.action(
+ action='deactivate',
+ action_condition=lambda h: h.status != hoststate.MAINTENANCE,
+ wait_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+ ret = hosts_module.create()
+ if module.params['enroll_certificate']:
+ ret = hosts_module.action(
+ action='enroll_certificate',
+ action_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ wait_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+ elif state == 'upgraded':
+ result_state = hoststate.MAINTENANCE if host.status == hoststate.MAINTENANCE else hoststate.UP
+ events_service = connection.system_service().events_service()
+ last_event = events_service.list(max=1)[0]
+
+ if module.params['check_upgrade']:
+ hosts_module.action(
+ action='upgrade_check',
+ action_condition=lambda host: not host.update_available,
+ wait_condition=lambda host: host.update_available or (
+ len([
+ event
+ for event in events_service.list(
+ from_=int(last_event.id),
+ search='type=885',
+ # Uncomment when 4.1 is EOL, and remove the cond:
+ # if host.name in event.description
+ # search='type=885 and host.name=%s' % host.name,
+ ) if host.name in event.description
+ ]) > 0
+ ),
+ fail_condition=lambda host: len(events_service.list(
+ from_=int(last_event.id),
+ search='type=839 or type=887 and host.name=%s' % host.name,
+ )
+ ) > 0,
+ )
+ # Set to False, because upgrade_check isn't 'changing' action:
+ hosts_module._changed = False
+ ret = hosts_module.action(
+ action='upgrade',
+ action_condition=lambda h: h.update_available,
+ wait_condition=lambda h: not h.update_available or h.status == result_state and (
+ len([
+ event
+ for event in events_service.list(
+ from_=int(last_event.id),
+ # Finished upgrade:
+ # 841: HOST_UPGRADE_FAILED
+ # 842: HOST_UPGRADE_FINISHED
+ # 888: HOST_UPGRADE_FINISHED_AND_WILL_BE_REBOOTED
+ search='type=842 or type=841 or type=888',
+ ) if host.name in event.description
+ ]) > 0
+ ),
+ post_action=lambda h: time.sleep(module.params['poll_interval']),
+ fail_condition=lambda h: hosts_module.failed_state_after_reinstall(h) or (
+ len([
+ event
+ for event in events_service.list(
+ from_=int(last_event.id),
+ # Fail upgrade if migration fails:
+ # 17: Failed to switch Host to Maintenance mode
+ # 65, 140: Migration failed
+ # 166: No available host was found to migrate VM
+ search='type=65 or type=140 or type=166 or type=17',
+ ) if host.name in event.description
+ ]) > 0
+ ),
+ reboot=module.params['reboot_after_upgrade'],
+ )
+ elif state == 'iscsidiscover':
+ host_id = get_id_by_name(hosts_service, module.params['name'])
+ iscsi_param = module.params['iscsi']
+ iscsi_targets = hosts_service.service(host_id).discover_iscsi(
+ iscsi=otypes.IscsiDetails(
+ port=int(iscsi_param.get('port', 3260)),
+ username=iscsi_param.get('username'),
+ password=iscsi_param.get('password'),
+ address=iscsi_param.get('address'),
+ portal=iscsi_param.get('portal'),
+ ),
+ )
+ ret = {
+ 'changed': False,
+ 'id': host_id,
+ 'iscsi_targets': [iscsi.target for iscsi in iscsi_targets],
+ 'iscsi_targets_struct': [get_dict_of_struct(
+ struct=iscsi,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for iscsi in iscsi_targets],
+ }
+ elif state == 'iscsilogin':
+ host_id = get_id_by_name(hosts_service, module.params['name'])
+ iscsi_param = module.params['iscsi']
+ ret = hosts_module.action(
+ action='iscsi_login',
+ iscsi=otypes.IscsiDetails(
+ port=int(iscsi_param.get('port', 3260)),
+ username=iscsi_param.get('username'),
+ password=iscsi_param.get('password'),
+ address=iscsi_param.get('address'),
+ target=iscsi_param.get('target'),
+ portal=iscsi_param.get('portal'),
+ ),
+ )
+ elif state == 'started':
+ ret = hosts_module.action(
+ action='fence',
+ action_condition=lambda h: h.status == hoststate.DOWN,
+ wait_condition=lambda h: h.status in [hoststate.UP, hoststate.MAINTENANCE],
+ fail_condition=hosts_module.failed_state_after_reinstall,
+ fence_type='start',
+ )
+ elif state == 'stopped':
+ hosts_module.action(
+ action='deactivate',
+ action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN],
+ wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN],
+ fail_condition=failed_state,
+ )
+ ret = hosts_module.action(
+ action='fence',
+ action_condition=lambda h: h.status != hoststate.DOWN,
+ wait_condition=lambda h: h.status == hoststate.DOWN if module.params['wait'] else True,
+ fail_condition=failed_state,
+ fence_type='stop',
+ )
+ elif state == 'restarted':
+ result_state = hoststate.MAINTENANCE if host.status == hoststate.MAINTENANCE else hoststate.UP
+ ret = hosts_module.action(
+ action='fence',
+ wait_condition=lambda h: h.status == result_state,
+ fail_condition=hosts_module.failed_state_after_reinstall,
+ fence_type='restart',
+ )
+ elif state == 'reinstalled':
+ # Deactivate host if not in maintanence:
+ hosts_module.action(
+ action='deactivate',
+ action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN],
+ wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN],
+ fail_condition=failed_state,
+ )
+
+ # Reinstall host:
+ ret = hosts_module.action(
+ action='install',
+ action_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ post_action=hosts_module.post_reinstall,
+ reboot=module.params.get('reboot_after_installation'),
+ wait_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ fail_condition=hosts_module.failed_state_after_reinstall,
+ host=otypes.Host(
+ override_iptables=module.params['override_iptables'],
+ ) if module.params['override_iptables'] else None,
+ root_password=module.params['password'],
+ ssh=otypes.Ssh(
+ authentication_method=otypes.SshAuthenticationMethod.PUBLICKEY,
+ ) if module.params['public_key'] else None,
+ deploy_hosted_engine=(
+ module.params.get('hosted_engine') == 'deploy'
+ ) if module.params.get('hosted_engine') is not None else None,
+ undeploy_hosted_engine=(
+ module.params.get('hosted_engine') == 'undeploy'
+ ) if module.params.get('hosted_engine') is not None else None,
+ )
+
+ # Activate host after reinstall:
+ if module.params['activate']:
+ ret = hosts_module.action(
+ action='activate',
+ action_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ wait_condition=lambda h: h.status == hoststate.UP,
+ fail_condition=failed_state,
+ )
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_info.py
new file mode 100644
index 000000000..230a923bc
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_info.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_info
+short_description: Retrieve information about one or more oVirt/RHV hosts
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV hosts."
+ - This module was called C(ovirt_host_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_host_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_hosts), which
+ contains a list of hosts. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search host X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ type: str
+ all_content:
+ description:
+ - "If I(true) all the attributes of the hosts should be
+ included in the response."
+ default: False
+ type: bool
+ cluster_version:
+ description:
+ - "Filter the hosts based on the cluster version."
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/host/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all hosts which names start with C(host) and
+# belong to data center C(west):
+- ovirt.ovirt.ovirt_host_info:
+ pattern: name=host* and datacenter=west
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+# All hosts with cluster version 4.2:
+- ovirt.ovirt.ovirt_host_info:
+ pattern: name=host*
+ cluster_version: "4.2"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+'''
+
+RETURN = '''
+ovirt_hosts:
+ description: "List of dictionaries describing the hosts. Host attributes are mapped to dictionary keys,
+ all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def get_filtered_hosts(cluster_version, hosts, connection):
+ # Filtering by cluster version returns only those which have same cluster version as input
+ filtered_hosts = []
+ for host in hosts:
+ cluster = connection.follow_link(host.cluster)
+ cluster_version_host = str(cluster.version.major) + '.' + str(cluster.version.minor)
+ if cluster_version_host == cluster_version:
+ filtered_hosts.append(host)
+ return filtered_hosts
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ cluster_version=dict(default=None, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ hosts = hosts_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content'],
+ follow=",".join(module.params['follow'])
+ )
+ cluster_version = module.params.get('cluster_version')
+ if cluster_version is not None:
+ hosts = get_filtered_hosts(cluster_version, hosts, connection)
+ result = dict(
+ ovirt_hosts=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in hosts
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_network.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_network.py
new file mode 100644
index 000000000..a48f1ed5d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_network.py
@@ -0,0 +1,607 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016, 2018 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_network
+short_description: Module to manage host networks in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage host networks in oVirt/RHV."
+options:
+ name:
+ description:
+ - "Name of the host to manage networks for."
+ required: true
+ type: str
+ aliases:
+ - 'host'
+ state:
+ description:
+ - "Should the host be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ bond:
+ description:
+ - "Dictionary describing network bond:"
+ suboptions:
+ name:
+ description:
+ - Bond name.
+ mode:
+ description:
+ - Bonding mode.
+ options:
+ description:
+ - Bonding options.
+ interfaces:
+ description:
+ - List of interfaces to create a bond.
+ type: dict
+ interface:
+ description:
+ - "Name of the network interface where logical network should be attached."
+ type: str
+ networks:
+ description:
+ - "List of dictionary describing networks to be attached to interface or bond:"
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the logical network to be assigned to bond or interface.
+ boot_protocol:
+ description:
+ - Boot protocol.
+ choices: ['none', 'static', 'dhcp']
+ address:
+ description:
+ - IP address in case of I(static) boot protocol is used.
+ netmask:
+ description:
+ - Subnet mask in case of I(static) boot protocol is used.
+ gateway:
+ description:
+ - Gateway in case of I(static) boot protocol is used.
+ version:
+ description:
+ - IP version. Either v4 or v6. Default is v4.
+ custom_properties:
+ description:
+ - "Custom properties applied to the host network."
+ - "Custom properties is a list of dictionary which can have following values."
+ suboptions:
+ name:
+ description:
+ - Name of custom property.
+ value:
+ description:
+ - Value of custom property.
+ labels:
+ description:
+ - "List of names of the network label to be assigned to bond or interface."
+ type: list
+ elements: str
+ check:
+ description:
+ - "If I(true) verify connectivity between host and engine."
+ - "Network configuration changes will be rolled back if connectivity between
+ engine and the host is lost after changing network configuration."
+ type: bool
+ save:
+ description:
+ - "If I(true) network configuration will be persistent, otherwise it is temporary. Default I(true) since Ansible 2.8."
+ type: bool
+ default: True
+ sync_networks:
+ description:
+ - "If I(true) all networks will be synchronized before modification"
+ type: bool
+ default: false
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# In all examples the durability of the configuration created is dependent on the 'save' option value:
+
+# Create bond on eth0 and eth1 interface, and put 'myvlan' network on top of it and persist the new configuration:
+- name: Bonds
+ ovirt.ovirt.ovirt_host_network:
+ name: myhost
+ save: yes
+ bond:
+ name: bond0
+ mode: 2
+ interfaces:
+ - eth1
+ - eth2
+ networks:
+ - name: myvlan
+ boot_protocol: static
+ address: 1.2.3.4
+ netmask: 255.255.255.0
+ gateway: 1.2.3.4
+ version: v4
+
+# Create bond on eth1 and eth2 interface, specifying both mode and miimon:
+- name: Bonds
+ ovirt.ovirt.ovirt_host_network:
+ name: myhost
+ bond:
+ name: bond0
+ mode: 1
+ options:
+ miimon: 200
+ interfaces:
+ - eth1
+ - eth2
+
+# Remove bond0 bond from host interfaces:
+- ovirt.ovirt.ovirt_host_network:
+ state: absent
+ name: myhost
+ bond:
+ name: bond0
+
+# Assign myvlan1 and myvlan2 vlans to host eth0 interface:
+- ovirt.ovirt.ovirt_host_network:
+ name: myhost
+ interface: eth0
+ networks:
+ - name: myvlan1
+ - name: myvlan2
+
+# Remove myvlan2 vlan from host eth0 interface:
+- ovirt.ovirt.ovirt_host_network:
+ state: absent
+ name: myhost
+ interface: eth0
+ networks:
+ - name: myvlan2
+
+# Remove all networks/vlans from host eth0 interface:
+- ovirt.ovirt.ovirt_host_network:
+ state: absent
+ name: myhost
+ interface: eth0
+
+# Add custom_properties to network:
+- ovirt.ovirt.ovirt_host_network:
+ name: myhost
+ interface: eth0
+ networks:
+ - name: myvlan1
+ custom_properties:
+ - name: bridge_opts
+ value: gc_timer=10
+'''
+
+RETURN = '''
+id:
+ description: ID of the host NIC which is managed
+ returned: On success if host NIC is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+host_nic:
+ description: "Dictionary of all the host NIC attributes. Host NIC attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_nic."
+ returned: On success if host NIC is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ get_entity,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_name,
+ engine_supported
+)
+
+
+def get_bond_options(mode, usr_opts):
+ MIIMON_100 = dict(miimon='100')
+ DEFAULT_MODE_OPTS = {
+ '1': MIIMON_100,
+ '2': MIIMON_100,
+ '3': MIIMON_100,
+ '4': dict(xmit_hash_policy='2', **MIIMON_100)
+ }
+
+ options = []
+ if mode is None:
+ return options
+
+ def get_type_name(mode_number):
+ """
+ We need to maintain this type strings, for the __compare_options method,
+ for easier comparision.
+ """
+ modes = [
+ 'Active-Backup',
+ 'Load balance (balance-xor)',
+ None,
+ 'Dynamic link aggregation (802.3ad)',
+ ]
+ if (not 0 < mode_number <= len(modes)):
+ return None
+ return modes[mode_number - 1]
+
+ try:
+ mode_number = int(mode)
+ except ValueError:
+ raise Exception('Bond mode must be a number.')
+
+ options.append(
+ otypes.Option(
+ name='mode',
+ type=get_type_name(mode_number),
+ value=str(mode_number)
+ )
+ )
+
+ opts_dict = DEFAULT_MODE_OPTS.get(str(mode), {})
+ if usr_opts is not None:
+ opts_dict.update(**usr_opts)
+
+ options.extend(
+ [otypes.Option(name=opt, value=str(value))
+ for opt, value in six.iteritems(opts_dict)]
+ )
+ return options
+
+
+class HostNetworksModule(BaseModule):
+
+ def __compare_options(self, new_options, old_options):
+ return sorted((get_dict_of_struct(opt) for opt in new_options),
+ key=lambda x: x["name"]) != sorted((get_dict_of_struct(opt) for opt in old_options),
+ key=lambda x: x["name"])
+
+ def build_entity(self):
+ return otypes.Host()
+
+ def update_custom_properties(self, attachments_service, attachment, network):
+ if network.get('custom_properties'):
+ current = []
+ if attachment.properties:
+ current = [(cp.name, str(cp.value)) for cp in attachment.properties]
+ passed = [(cp.get('name'), str(cp.get('value'))) for cp in network.get('custom_properties') if cp]
+ if sorted(current) != sorted(passed):
+ attachment.properties = [
+ otypes.Property(
+ name=prop.get('name'),
+ value=prop.get('value')
+ ) for prop in network.get('custom_properties')
+ ]
+ if not self._module.check_mode:
+ attachments_service.service(attachment.id).update(attachment)
+ self.changed = True
+
+ def update_address(self, attachments_service, attachment, network):
+ # Check if there is any change in address assignments and
+ # update it if needed:
+ for ip in attachment.ip_address_assignments:
+ if str(ip.ip.version) == network.get('version', 'v4'):
+ changed = False
+ if not equal(network.get('boot_protocol'), str(ip.assignment_method)):
+ ip.assignment_method = otypes.BootProtocol(network.get('boot_protocol'))
+ changed = True
+ if not equal(network.get('address'), ip.ip.address):
+ ip.ip.address = network.get('address')
+ changed = True
+ if not equal(network.get('gateway'), ip.ip.gateway):
+ ip.ip.gateway = network.get('gateway')
+ changed = True
+ if not equal(network.get('netmask'), ip.ip.netmask):
+ ip.ip.netmask = network.get('netmask')
+ changed = True
+
+ if changed:
+ if not self._module.check_mode:
+ attachments_service.service(attachment.id).update(attachment)
+ self.changed = True
+ break
+
+ def has_update(self, nic_service):
+ update = False
+ bond = self._module.params['bond']
+ networks = self._module.params['networks']
+ labels = self._module.params['labels']
+ nic = get_entity(nic_service)
+
+ if nic is None:
+ return update
+
+ # Check if bond configuration should be updated:
+ if bond:
+ update = self.__compare_options(get_bond_options(bond.get('mode'), bond.get('options')), getattr(nic.bonding, 'options', []))
+ update = update or not equal(
+ sorted(bond.get('interfaces')) if bond.get('interfaces') else None,
+ sorted(get_link_name(self._connection, s) for s in nic.bonding.slaves)
+ )
+
+ # Check if labels need to be updated on interface/bond:
+ if labels:
+ net_labels = nic_service.network_labels_service().list()
+ # If any labels which user passed aren't assigned, relabel the interface:
+ if sorted(labels) != sorted([lbl.id for lbl in net_labels]):
+ return True
+
+ if not networks:
+ return update
+
+ # Check if networks attachments configuration should be updated:
+ attachments_service = nic_service.network_attachments_service()
+ network_names = [network.get('name') for network in networks]
+
+ attachments = {}
+ for attachment in attachments_service.list():
+ name = get_link_name(self._connection, attachment.network)
+ if name in network_names:
+ attachments[name] = attachment
+
+ for network in networks:
+ attachment = attachments.get(network.get('name'))
+ # If attachment don't exists, we need to create it:
+ if attachment is None:
+ return True
+ self.update_custom_properties(attachments_service, attachment, network)
+ self.update_address(attachments_service, attachment, network)
+
+ return update
+
+ def _action_save_configuration(self, entity):
+ if not self._module.check_mode:
+ self._service.service(entity.id).commit_net_config()
+ self.changed = True
+
+
+def needs_sync(nics_service):
+ nics = nics_service.list()
+ for nic in nics:
+ nic_service = nics_service.nic_service(nic.id)
+ for network_attachment_service in nic_service.network_attachments_service().list():
+ if not network_attachment_service.in_sync:
+ return True
+ return False
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(aliases=['host'], required=True),
+ bond=dict(default=None, type='dict'),
+ interface=dict(default=None),
+ networks=dict(default=None, type='list', elements='dict'),
+ labels=dict(default=None, type='list', elements='str'),
+ check=dict(default=None, type='bool'),
+ save=dict(default=True, type='bool'),
+ sync_networks=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ host_networks_module = HostNetworksModule(
+ connection=connection,
+ module=module,
+ service=hosts_service,
+ )
+
+ host = host_networks_module.search_entity()
+ if host is None:
+ raise Exception("Host '%s' was not found." % module.params['name'])
+
+ bond = module.params['bond']
+ interface = module.params['interface']
+ networks = module.params['networks']
+ labels = module.params['labels']
+ nic_name = bond.get('name') if bond else module.params['interface']
+
+ host_service = hosts_service.host_service(host.id)
+ nics_service = host_service.nics_service()
+ nic = search_by_name(nics_service, nic_name)
+
+ if module.params["sync_networks"]:
+ if needs_sync(nics_service):
+ if not module.check_mode:
+ host_service.sync_all_networks()
+ host_networks_module.changed = True
+
+ network_names = [network['name'] for network in networks or []]
+ state = module.params['state']
+
+ if (
+ state == 'present' and
+ (nic is None or host_networks_module.has_update(nics_service.service(nic.id)))
+ ):
+ # Remove networks which are attached to different interface then user want:
+ attachments_service = host_service.network_attachments_service()
+
+ # Append attachment ID to network if needs update:
+ for a in attachments_service.list():
+ current_network_name = get_link_name(connection, a.network)
+ if current_network_name in network_names:
+ for n in networks:
+ if n['name'] == current_network_name:
+ n['id'] = a.id
+
+ # Check if we have to break some bonds:
+ removed_bonds = []
+ if nic is not None:
+ for host_nic in nics_service.list():
+ if host_nic.bonding and nic.id in [slave.id for slave in host_nic.bonding.slaves]:
+ removed_bonds.append(otypes.HostNic(id=host_nic.id))
+
+ # Assign the networks:
+ setup_params = dict(
+ entity=host,
+ action='setup_networks',
+ check_connectivity=module.params['check'],
+ removed_bonds=removed_bonds if removed_bonds else None,
+ modified_bonds=[
+ otypes.HostNic(
+ name=bond.get('name'),
+ bonding=otypes.Bonding(
+ options=get_bond_options(bond.get('mode'), bond.get('options')),
+ slaves=[
+ otypes.HostNic(name=i) for i in bond.get('interfaces', [])
+ ],
+ ),
+ ),
+ ] if bond else None,
+ modified_labels=[
+ otypes.NetworkLabel(
+ id=str(name),
+ host_nic=otypes.HostNic(
+ name=bond.get('name') if bond else interface
+ ),
+ ) for name in labels
+ ] if labels else None,
+ modified_network_attachments=[
+ otypes.NetworkAttachment(
+ id=network.get('id'),
+ network=otypes.Network(
+ name=network['name']
+ ) if network['name'] else None,
+ host_nic=otypes.HostNic(
+ name=bond.get('name') if bond else interface
+ ),
+ ip_address_assignments=[
+ otypes.IpAddressAssignment(
+ assignment_method=otypes.BootProtocol(
+ network.get('boot_protocol', 'none')
+ ),
+ ip=otypes.Ip(
+ address=network.get('address'),
+ gateway=network.get('gateway'),
+ netmask=network.get('netmask'),
+ version=otypes.IpVersion(
+ network.get('version')
+ ) if network.get('version') else None,
+ ),
+ ),
+ ],
+ properties=[
+ otypes.Property(
+ name=prop.get('name'),
+ value=prop.get('value')
+ ) for prop in network.get('custom_properties', [])
+ ]
+ ) for network in networks
+ ] if networks else None,
+ )
+ if engine_supported(connection, '4.3'):
+ setup_params['commit_on_success'] = module.params['save']
+ elif module.params['save']:
+ setup_params['post_action'] = host_networks_module._action_save_configuration
+ host_networks_module.action(**setup_params)
+ elif state == 'absent' and nic:
+ attachments = []
+ nic_service = nics_service.nic_service(nic.id)
+
+ attached_labels = set([str(lbl.id) for lbl in nic_service.network_labels_service().list()])
+ if networks:
+ attachments_service = nic_service.network_attachments_service()
+ attachments = attachments_service.list()
+ attachments = [
+ attachment for attachment in attachments
+ if get_link_name(connection, attachment.network) in network_names
+ ]
+
+ # Remove unmanaged networks:
+ unmanaged_networks_service = host_service.unmanaged_networks_service()
+ unmanaged_networks = [(u.id, u.name) for u in unmanaged_networks_service.list()]
+ for net_id, net_name in unmanaged_networks:
+ if net_name in network_names:
+ if not module.check_mode:
+ unmanaged_networks_service.unmanaged_network_service(net_id).remove()
+ host_networks_module.changed = True
+
+ # Need to check if there are any labels to be removed, as backend fail
+ # if we try to send remove non existing label, for bond and attachments it's OK:
+ if (labels and set(labels).intersection(attached_labels)) or bond or attachments:
+ setup_params = dict(
+ entity=host,
+ action='setup_networks',
+ check_connectivity=module.params['check'],
+ removed_bonds=[
+ otypes.HostNic(
+ name=bond.get('name'),
+ ),
+ ] if bond else None,
+ removed_labels=[
+ otypes.NetworkLabel(id=str(name)) for name in labels
+ ] if labels else None,
+ removed_network_attachments=attachments if attachments else None,
+ )
+ if engine_supported(connection, '4.4'):
+ setup_params['commit_on_success'] = module.params['save']
+ elif module.params['save']:
+ setup_params['post_action'] = host_networks_module._action_save_configuration
+ host_networks_module.action(**setup_params)
+
+ nic = search_by_name(nics_service, nic_name)
+ module.exit_json(**{
+ 'changed': host_networks_module.changed,
+ 'id': nic.id if nic else None,
+ 'host_nic': get_dict_of_struct(nic),
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_pm.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_pm.py
new file mode 100644
index 000000000..ca32a20ce
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_pm.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_pm
+short_description: Module to manage power management of hosts in oVirt/RHV
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage power management of hosts in oVirt/RHV."
+options:
+ name:
+ description:
+ - "Name of the host to manage."
+ required: true
+ aliases: ['host']
+ type: str
+ state:
+ description:
+ - "Should the host be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ address:
+ description:
+ - "Address of the power management interface."
+ type: str
+ username:
+ description:
+ - "Username to be used to connect to power management interface."
+ type: str
+ password:
+ description:
+ - "Password of the user specified in C(username) parameter."
+ type: str
+ type:
+ description:
+ - "Type of the power management. oVirt/RHV predefined values are I(drac5), I(ipmilan), I(rsa),
+ I(bladecenter), I(alom), I(apc), I(apc_snmp), I(eps), I(wti), I(rsb), I(cisco_ucs),
+ I(drac7), I(hpblade), I(ilo), I(ilo2), I(ilo3), I(ilo4), I(ilo_ssh),
+ but user can have defined custom type."
+ type: str
+ port:
+ description:
+ - "Power management interface port."
+ type: int
+ options:
+ description:
+ - "Dictionary of additional fence agent options (including Power Management slot)."
+ - "Additional information about options can be found at U(https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md)."
+ type: dict
+ encrypt_options:
+ description:
+ - "If I(true) options will be encrypted when send to agent."
+ aliases: ['encrypt']
+ type: bool
+ order:
+ description:
+ - "Integer value specifying, by default it's added at the end."
+ type: int
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add fence agent to host 'myhost'
+- ovirt.ovirt.ovirt_host_pm:
+ name: myhost
+ address: 1.2.3.4
+ options:
+ myoption1: x
+ myoption2: y
+ username: admin
+ password: admin
+ port: 3333
+ type: ipmilan
+
+# Add fence agent to host 'myhost' using 'slot' option
+- ovirt.ovirt.ovirt_host_pm:
+ name: myhost
+ address: 1.2.3.4
+ options:
+ myoption1: x
+ myoption2: y
+ slot: myslot
+ username: admin
+ password: admin
+ port: 3333
+ type: ipmilan
+
+
+# Remove ipmilan fence agent with address 1.2.3.4 on host 'myhost'
+- ovirt.ovirt.ovirt_host_pm:
+ state: absent
+ name: myhost
+ address: 1.2.3.4
+ type: ipmilan
+'''
+
+RETURN = '''
+id:
+ description: ID of the agent which is managed
+ returned: On success if agent is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+agent:
+ description: "Dictionary of all the agent attributes. Agent attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/agent."
+ returned: On success if agent is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+class HostModule(BaseModule):
+ def build_entity(self):
+ return otypes.Host(
+ power_management=otypes.PowerManagement(
+ enabled=True,
+ ),
+ )
+
+ def update_check(self, entity):
+ return equal(True, entity.power_management.enabled)
+
+
+class HostPmModule(BaseModule):
+
+ def pre_create(self, entity):
+ # Save the entity, so we know if Agent already existed
+ self.entity = entity
+
+ def build_entity(self):
+ last = next((s for s in sorted([a.order for a in self._service.list()])), 0)
+ order = self.param('order') if self.param('order') is not None else self.entity.order if self.entity else last + 1
+ return otypes.Agent(
+ address=self._module.params['address'],
+ encrypt_options=self._module.params['encrypt_options'],
+ options=[
+ otypes.Option(
+ name=name,
+ value=value,
+ ) for name, value in self._module.params['options'].items()
+ ] if self._module.params['options'] else None,
+ password=self._module.params['password'],
+ port=self._module.params['port'],
+ type=self._module.params['type'],
+ username=self._module.params['username'],
+ order=order,
+ )
+
+ def update_check(self, entity):
+ def check_options():
+ if self.param('options'):
+ current = []
+ if entity.options:
+ current = [(opt.name, str(opt.value)) for opt in entity.options]
+ passed = [(k, str(v)) for k, v in self.param('options').items()]
+ return sorted(current) == sorted(passed)
+ return True
+
+ return (
+ check_options() and
+ equal(self._module.params.get('address'), entity.address) and
+ equal(self._module.params.get('encrypt_options'), entity.encrypt_options) and
+ equal(self._module.params.get('username'), entity.username) and
+ equal(self._module.params.get('port'), entity.port) and
+ equal(self._module.params.get('type'), entity.type) and
+ equal(self._module.params.get('order'), entity.order)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True, aliases=['host']),
+ address=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ type=dict(default=None),
+ port=dict(default=None, type='int'),
+ order=dict(default=None, type='int'),
+ options=dict(default=None, type='dict'),
+ encrypt_options=dict(default=None, type='bool', aliases=['encrypt']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ host = search_by_name(hosts_service, module.params['name'])
+ fence_agents_service = hosts_service.host_service(host.id).fence_agents_service()
+
+ host_pm_module = HostPmModule(
+ connection=connection,
+ module=module,
+ service=fence_agents_service,
+ )
+ host_module = HostModule(
+ connection=connection,
+ module=module,
+ service=hosts_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ agent = host_pm_module.search_entity(
+ search_params={
+ 'address': module.params['address'],
+ 'type': module.params['type'],
+ }
+ )
+ ret = host_pm_module.create(entity=agent)
+
+ # Enable Power Management, if it's not enabled:
+ host_module.create(entity=host)
+ elif state == 'absent':
+ agent = host_pm_module.search_entity(
+ search_params={
+ 'address': module.params['address'],
+ 'type': module.params['type'],
+ }
+ )
+ ret = host_pm_module.remove(entity=agent)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_storage_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_storage_info.py
new file mode 100644
index 000000000..b1500e599
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_storage_info.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_storage_info
+short_description: Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)
+version_added: "1.0.0"
+author: "Daniel Erez (@derez)"
+description:
+ - "Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)."
+ - This module was called C(ovirt_host_storage_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_host_storage_info) module no longer returns C(ansible_facts)!
+options:
+ host:
+ description:
+ - "Host to get device list from."
+ required: true
+ type: str
+ iscsi:
+ description:
+ - "Dictionary with values for iSCSI storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the iSCSI storage server."
+ target:
+ description:
+ - "The target IQN for the storage device."
+ username:
+ description:
+ - "A CHAP user name for logging into a target."
+ password:
+ description:
+ - "A CHAP password for logging into a target."
+ portal:
+ description:
+ - "The portal being used to connect with iscsi."
+ type: dict
+ fcp:
+ description:
+ - "Dictionary with values for fibre channel storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the fibre channel storage server."
+ port:
+ description:
+ - "Port of the fibre channel storage server."
+ lun_id:
+ description:
+ - "LUN id."
+ type: dict
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/host_storage/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about HostStorages with specified target and address:
+- ovirt.ovirt.ovirt_host_storage_info:
+ host: myhost
+ iscsi:
+ target: iqn.2016-08-09.domain-01:nickname
+ address: 10.34.63.204
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_host_storages }}"
+
+- name: Gather information about all storages
+ ovirt.ovirt.ovirt_host_storage_info:
+ host: myhost
+
+- name: Gather information about all iscsi storages
+ ovirt.ovirt.ovirt_host_storage_info:
+ host: myhost
+ iscsi: {}
+
+- name: Gather information about all fcp storages
+ ovirt.ovirt.ovirt_host_storage_info:
+ host: myhost
+ fcp: {}
+'''
+
+RETURN = '''
+ovirt_host_storages:
+ description: "List of dictionaries describing the HostStorage. HostStorage attributes are mapped to dictionary keys,
+ all HostStorage attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_storage."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name,
+)
+
+
+def _login(host_service, iscsi):
+ host_service.iscsi_login(
+ iscsi=otypes.IscsiDetails(
+ username=iscsi.get('username'),
+ password=iscsi.get('password'),
+ address=iscsi.get('address'),
+ target=iscsi.get('target'),
+ portal=iscsi.get('portal')
+ ),
+ )
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ host=dict(required=True),
+ iscsi=dict(default=None, type='dict'),
+ fcp=dict(default=None, type='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+
+ # Get Host
+ hosts_service = connection.system_service().hosts_service()
+ host_id = get_id_by_name(hosts_service, module.params['host'])
+ host_service = hosts_service.host_service(host_id)
+
+ if module.params.get('iscsi'):
+ # Login
+ _login(host_service, module.params.get('iscsi'))
+
+ # Get LUNs exposed from the specified target
+ host_storages = host_service.storage_service().list(follow=",".join(module.params['follow']))
+ if module.params.get('iscsi') is not None:
+ host_storages = list(filter(lambda x: x.type == otypes.StorageType.ISCSI, host_storages))
+ if 'target' in module.params.get('iscsi'):
+ host_storages = list(filter(lambda x: module.params.get('iscsi').get('target') == x.logical_units[0].target, host_storages))
+ elif module.params.get('fcp') is not None:
+ host_storages = list(filter(lambda x: x.type == otypes.StorageType.FCP, host_storages))
+
+ result = dict(
+ ovirt_host_storages=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in host_storages
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_instance_type.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_instance_type.py
new file mode 100644
index 000000000..6a39e2b59
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_instance_type.py
@@ -0,0 +1,632 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_instance_type
+short_description: Module to manage Instance Types in oVirt/RHV
+version_added: "1.0.0"
+author:
+- Martin Necas (@mnecas)
+- Ondra Machacek (@machacekondra)
+description:
+ - This module manages whole lifecycle of the Instance Type in oVirt/RHV.
+options:
+ name:
+ description:
+ - Name of the Instance Type to manage.
+ - If instance type don't exists C(name) is required. Otherwise C(id) or C(name) can be used.
+ type: str
+ id:
+ description:
+ - ID of the Instance Type to manage.
+ type: str
+ state:
+ description:
+ - Should the Instance Type be present/absent.
+ - I(present) state will create/update instance type and don't change its state if it already exists.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ memory:
+ description:
+ - Amount of memory of the Instance Type. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ type: str
+ memory_guaranteed:
+ description:
+ - Amount of minimal guaranteed memory of the Instance Type.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
+ - Default value is set by engine.
+ type: str
+ nics:
+ description:
+ - List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
+ - NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
+ To manage NICs of the instance type in more depth please use M(ovirt.ovirt.ovirt_nic) module instead.
+ suboptions:
+ name:
+ description:
+ - "Name of the NIC."
+ type: str
+ profile_name:
+ description:
+ - "Profile name where NIC should be attached."
+ type: str
+ interface:
+ description:
+ - "Type of the network interface."
+ type: str
+ choices: [ virtio, e1000, rtl8139 ]
+ default: virtio
+ mac_address:
+ description:
+ - "Custom MAC address of the network interface, by default it's obtained from MAC pool."
+ type: str
+ type: list
+ elements: dict
+ memory_max:
+ description:
+ - Upper bound of instance type memory up to which memory hot-plug can be performed.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ type: str
+ cpu_cores:
+ description:
+ - Number of virtual CPUs cores of the Instance Type.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ cpu_sockets:
+ description:
+ - Number of virtual CPUs sockets of the Instance Type.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ cpu_threads:
+ description:
+ - Number of virtual CPUs sockets of the Instance Type.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ operating_system:
+ description:
+ - Operating system of the Instance Type, for example 'rhel_8x64'.
+ - Default value is set by oVirt/RHV engine.
+ - Use the M(ovirt.ovirt.ovirt_vm_os_info) module to obtain the current list.
+ type: str
+ boot_devices:
+ description:
+ - List of boot devices which should be used to boot. For example C([ cdrom, hd ]).
+ - Default value is set by oVirt/RHV engine.
+ choices: [ cdrom, hd, network ]
+ type: list
+ elements: str
+ serial_console:
+ description:
+ - "I(True) enable VirtIO serial console, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ usb_support:
+ description:
+ - "I(True) enable USB support, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ high_availability:
+ description:
+ - If I(yes) Instance Type will be set as highly available.
+ - If I(no) Instance Type won't be set as highly available.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: bool
+ high_availability_priority:
+ description:
+ - Indicates the priority of the instance type inside the run and migration queues.
+ Instance Type with higher priorities will be started and migrated before instance types with lower
+ priorities. The value is an integer between 0 and 100. The higher the value, the higher the priority.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: int
+ watchdog:
+ description:
+ - "Assign watchdog device for the instance type."
+ - "Watchdogs is a dictionary which can have following values:"
+ - "C(model) - Model of the watchdog device. For example: I(i6300esb), I(diag288) or I(null)."
+ - "C(action) - Watchdog action to be performed when watchdog is triggered. For example: I(none), I(reset), I(poweroff), I(pause) or I(dump)."
+ type: dict
+ host:
+ description:
+ - Specify host where Instance Type should be running. By default the host is chosen by engine scheduler.
+ - This parameter is used only when C(state) is I(running) or I(present).
+ type: str
+ graphical_console:
+ description:
+ - "Assign graphical console to the instance type."
+ - "Graphical console is a dictionary which can have following values:"
+ - "C(headless_mode) - If I(true) disable the graphics console for this instance type."
+ - "C(protocol) - Graphical protocol, a list of I(spice), I(vnc), or both."
+ type: dict
+ description:
+ description:
+ - "Description of the instance type."
+ type: str
+ cpu_mode:
+ description:
+ - "CPU mode of the instance type. It can be some of the following: I(host_passthrough), I(host_model) or I(custom)."
+ - "For I(host_passthrough) CPU type you need to set C(placement_policy) to I(pinned)."
+ - "If no value is passed, default value is set by oVirt/RHV engine."
+ type: str
+ rng_device:
+ description:
+ - "Random number generator (RNG). You can choose of one the following devices I(urandom), I(random) or I(hwrng)."
+ - "In order to select I(hwrng), you must have it enabled on cluster first."
+ - "/dev/urandom is used for cluster version >= 4.1, and /dev/random for cluster version <= 4.0"
+ type: str
+ rng_bytes:
+ description:
+ - "Number of bytes allowed to consume per period."
+ type: int
+ rng_period:
+ description:
+ - "Duration of one period in milliseconds."
+ type: int
+ placement_policy:
+ description:
+ - "The configuration of the instance type's placement policy."
+ - "Placement policy can be one of the following values:"
+ - "C(migratable) - Allow manual and automatic migration."
+ - "C(pinned) - Do not allow migration."
+ - "C(user_migratable) - Allow manual migration only."
+ - "If no value is passed, default value is set by oVirt/RHV engine."
+ type: str
+ cpu_pinning:
+ description:
+ - "CPU Pinning topology to map instance type CPU to host CPU."
+ - "CPU Pinning topology is a list of dictionary which can have following values:"
+ suboptions:
+ cpu:
+ description:
+ - "Number of the host CPU."
+ vcpu:
+ description:
+ - "Number of the instance type CPU."
+ type: list
+ elements: dict
+ soundcard_enabled:
+ description:
+ - "If I(true), the sound card is added to the instance type."
+ type: bool
+ smartcard_enabled:
+ description:
+ - "If I(true), use smart card authentication."
+ type: bool
+ virtio_scsi:
+ description:
+ - "If I(true), virtio scsi will be enabled."
+ type: bool
+ io_threads:
+ description:
+ - "Number of IO threads used by instance type. I(0) means IO threading disabled."
+ type: int
+ ballooning_enabled:
+ description:
+ - "If I(true), use memory ballooning."
+ - "Memory balloon is a guest device, which may be used to re-distribute / reclaim the host memory
+ based on instance type needs in a dynamic way. In this way it's possible to create memory over commitment states."
+ type: bool
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create instance type
+- name: Create instance type
+ ovirt.ovirt.ovirt_instance_type:
+ state: present
+ name: myit
+ rng_device: hwrng
+ rng_bytes: 200
+ rng_period: 200
+ soundcard_enabled: true
+ virtio_scsi: true
+ boot_devices:
+ - network
+
+# Remove instance type
+- ovirt.ovirt.ovirt_instance_type:
+ state: absent
+ name: myit
+
+
+# Create instance type with predefined memory and cpu limits.
+- ovirt.ovirt.ovirt_instance_type:
+ state: present
+ name: myit
+ memory: 2GiB
+ cpu_cores: 2
+ cpu_sockets: 2
+ nics:
+ - name: nic1
+
+# Enable usb support and serial console
+- ovirt.ovirt.ovirt_instance_type:
+ name: myit
+ usb_support: True
+ serial_console: True
+
+# Use graphical console with spice and vnc
+- name: Create a instance type that has the console configured for both Spice and VNC
+ ovirt.ovirt.ovirt_instance_type:
+ name: myit
+ graphical_console:
+ protocol:
+ - spice
+ - vnc
+'''
+
+
+RETURN = '''
+
+id:
+ description: ID of the instance type which is managed
+ returned: On success if instance type is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+instancetype:
+ description: "Dictionary of all the instance type attributes. instance type attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/instance_type."
+ returned: On success if instance type is found.
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import traceback
+
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_params,
+ check_sdk,
+ convert_to_bytes,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ get_entity,
+ get_link_name,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+ wait,
+)
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+
+class InstanceTypeModule(BaseModule):
+ def build_entity(self):
+ return otypes.InstanceType(
+ id=self.param('id'),
+ name=self.param('name'),
+ console=(
+ otypes.Console(enabled=self.param('serial_console'))
+ ) if self.param('serial_console') is not None else None,
+ usb=(
+ otypes.Usb(enabled=self.param('usb_support'))
+ ) if self.param('usb_support') is not None else None,
+ high_availability=otypes.HighAvailability(
+ enabled=self.param('high_availability'),
+ priority=self.param('high_availability_priority'),
+ ) if self.param('high_availability') is not None or self.param('high_availability_priority') else None,
+ cpu=otypes.Cpu(
+ topology=otypes.CpuTopology(
+ cores=self.param('cpu_cores'),
+ sockets=self.param('cpu_sockets'),
+ threads=self.param('cpu_threads'),
+ ) if any((
+ self.param('cpu_cores'),
+ self.param('cpu_sockets'),
+ self.param('cpu_threads')
+ )) else None,
+ cpu_tune=otypes.CpuTune(
+ vcpu_pins=[
+ otypes.VcpuPin(vcpu=int(pin['vcpu']), cpu_set=str(pin['cpu'])) for pin in self.param('cpu_pinning')
+ ],
+ ) if self.param('cpu_pinning') else None,
+ mode=otypes.CpuMode(self.param('cpu_mode')) if self.param(
+ 'cpu_mode') else None,
+ ) if any((
+ self.param('cpu_cores'),
+ self.param('cpu_sockets'),
+ self.param('cpu_threads'),
+ self.param('cpu_mode'),
+ self.param('cpu_pinning')
+ )) else None,
+ os=otypes.OperatingSystem(
+ type=self.param('operating_system'),
+ boot=otypes.Boot(
+ devices=[
+ otypes.BootDevice(dev) for dev in self.param('boot_devices')
+ ],
+ ) if self.param('boot_devices') else None
+ ),
+ rng_device=otypes.RngDevice(
+ source=otypes.RngSource(self.param('rng_device')),
+ rate=otypes.Rate(
+ bytes=self.param('rng_bytes'),
+ period=self.param('rng_period')
+ )
+ ) if self.param('rng_device') else None,
+ memory=convert_to_bytes(
+ self.param('memory')
+ ) if self.param('memory') else None,
+ virtio_scsi=otypes.VirtioScsi(
+ enabled=self.param('virtio_scsi')
+ ) if self.param('virtio_scsi') else None,
+ memory_policy=otypes.MemoryPolicy(
+ guaranteed=convert_to_bytes(self.param('memory_guaranteed')),
+ ballooning=self.param('ballooning_enabled'),
+ max=convert_to_bytes(self.param('memory_max')),
+ ) if any((
+ self.param('memory_guaranteed'),
+ self.param('ballooning_enabled') is not None,
+ self.param('memory_max')
+ )) else None,
+ description=self.param('description'),
+ placement_policy=otypes.VmPlacementPolicy(
+ affinity=otypes.VmAffinity(self.param('placement_policy')),
+ hosts=[
+ otypes.Host(name=self.param('host')),
+ ] if self.param('host') else None,
+ ) if self.param('placement_policy') else None,
+ soundcard_enabled=self.param('soundcard_enabled'),
+ display=otypes.Display(
+ smartcard_enabled=self.param('smartcard_enabled')
+ ) if self.param('smartcard_enabled') is not None else None,
+ io=otypes.Io(
+ threads=self.param('io_threads'),
+ ) if self.param('io_threads') is not None else None,
+ )
+
+ def __attach_watchdog(self, entity):
+ watchdogs_service = self._service.service(entity.id).watchdogs_service()
+ watchdog = self.param('watchdog')
+ if watchdog is not None:
+ current_watchdog = next(iter(watchdogs_service.list()), None)
+ if watchdog.get('model') is None and current_watchdog:
+ watchdogs_service.watchdog_service(current_watchdog.id).remove()
+ return True
+ elif watchdog.get('model') is not None and current_watchdog is None:
+ watchdogs_service.add(
+ otypes.Watchdog(
+ model=otypes.WatchdogModel(watchdog.get('model').lower()),
+ action=otypes.WatchdogAction(watchdog.get('action')),
+ )
+ )
+ return True
+ elif current_watchdog is not None:
+ if (
+ str(current_watchdog.model).lower() != watchdog.get('model').lower() or
+ str(current_watchdog.action).lower() != watchdog.get('action').lower()
+ ):
+ watchdogs_service.watchdog_service(current_watchdog.id).update(
+ otypes.Watchdog(
+ model=otypes.WatchdogModel(watchdog.get('model')),
+ action=otypes.WatchdogAction(watchdog.get('action')),
+ )
+ )
+ return True
+ return False
+
+ def __get_vnic_profile_id(self, nic):
+ """
+ Return VNIC profile ID looked up by it's name, because there can be
+ more VNIC profiles with same name, other criteria of filter is cluster.
+ """
+ vnics_service = self._connection.system_service().vnic_profiles_service()
+ clusters_service = self._connection.system_service().clusters_service()
+ cluster = search_by_name(clusters_service, self.param('cluster'))
+ profiles = [
+ profile for profile in vnics_service.list()
+ if profile.name == nic.get('profile_name')
+ ]
+ cluster_networks = [
+ net.id for net in self._connection.follow_link(cluster.networks)
+ ]
+ try:
+ return next(
+ profile.id for profile in profiles
+ if profile.network.id in cluster_networks
+ )
+ except StopIteration:
+ raise Exception(
+ "Profile '%s' was not found in cluster '%s'" % (
+ nic.get('profile_name'),
+ self.param('cluster')
+ )
+ )
+
+ def __attach_nics(self, entity):
+ # Attach NICs to instance type, if specified:
+ nics_service = self._service.service(entity.id).nics_service()
+ for nic in self.param('nics'):
+ if search_by_name(nics_service, nic.get('name')) is None:
+ if not self._module.check_mode:
+ nics_service.add(
+ otypes.Nic(
+ name=nic.get('name'),
+ interface=otypes.NicInterface(
+ nic.get('interface', 'virtio')
+ ),
+ vnic_profile=otypes.VnicProfile(
+ id=self.__get_vnic_profile_id(nic),
+ ) if nic.get('profile_name') else None,
+ mac=otypes.Mac(
+ address=nic.get('mac_address')
+ ) if nic.get('mac_address') else None,
+ )
+ )
+ self.changed = True
+
+ def __attach_graphical_console(self, entity):
+ graphical_console = self.param('graphical_console')
+ if not graphical_console:
+ return False
+
+ it_service = self._service.instance_type_service(entity.id)
+ gcs_service = it_service.graphics_consoles_service()
+ graphical_consoles = gcs_service.list()
+ # Remove all graphical consoles if there are any:
+ if bool(graphical_console.get('headless_mode')):
+ if not self._module.check_mode:
+ for gc in graphical_consoles:
+ gcs_service.console_service(gc.id).remove()
+ return len(graphical_consoles) > 0
+
+ # If there are not gc add any gc to be added:
+ protocol = graphical_console.get('protocol')
+ if isinstance(protocol, str):
+ protocol = [protocol]
+
+ current_protocols = [str(gc.protocol) for gc in graphical_consoles]
+ if not current_protocols:
+ if not self._module.check_mode:
+ for p in protocol:
+ gcs_service.add(
+ otypes.GraphicsConsole(
+ protocol=otypes.GraphicsType(p),
+ )
+ )
+ return True
+
+ # Update consoles:
+ if sorted(protocol) != sorted(current_protocols):
+ if not self._module.check_mode:
+ for gc in graphical_consoles:
+ gcs_service.console_service(gc.id).remove()
+ for p in protocol:
+ gcs_service.add(
+ otypes.GraphicsConsole(
+ protocol=otypes.GraphicsType(p),
+ )
+ )
+ return True
+
+ def post_update(self, entity):
+ self.post_present(entity.id)
+
+ def post_present(self, entity_id):
+ entity = self._service.service(entity_id).get()
+ self.changed = self.__attach_nics(entity)
+ self.changed = self.__attach_watchdog(entity)
+ self.changed = self.__attach_graphical_console(entity)
+
+ def update_check(self, entity):
+ cpu_mode = getattr(entity.cpu, 'mode')
+ it_display = entity.display
+ return (
+ not self.param('kernel_params_persist') and
+ equal(convert_to_bytes(self.param('memory_guaranteed')), entity.memory_policy.guaranteed) and
+ equal(convert_to_bytes(self.param('memory_max')), entity.memory_policy.max) and
+ equal(self.param('cpu_cores'), entity.cpu.topology.cores) and
+ equal(self.param('cpu_sockets'), entity.cpu.topology.sockets) and
+ equal(self.param('cpu_threads'), entity.cpu.topology.threads) and
+ equal(self.param('cpu_mode'), str(cpu_mode) if cpu_mode else None) and
+ equal(self.param('type'), str(entity.type)) and
+ equal(self.param('name'), str(entity.name)) and
+ equal(self.param('operating_system'), str(entity.os.type)) and
+ equal(self.param('soundcard_enabled'), entity.soundcard_enabled) and
+ equal(self.param('smartcard_enabled'), getattr(it_display, 'smartcard_enabled', False)) and
+ equal(self.param('io_threads'), entity.io.threads) and
+ equal(self.param('ballooning_enabled'), entity.memory_policy.ballooning) and
+ equal(self.param('serial_console'), getattr(entity.console, 'enabled', None)) and
+ equal(self.param('usb_support'), entity.usb.enabled) and
+ equal(self.param('virtio_scsi'), getattr(entity, 'smartcard_enabled', False)) and
+ equal(self.param('high_availability'), entity.high_availability.enabled) and
+ equal(self.param('high_availability_priority'), entity.high_availability.priority) and
+ equal(self.param('boot_devices'), [str(dev) for dev in getattr(entity.os.boot, 'devices', [])]) and
+ equal(self.param('description'), entity.description) and
+ equal(self.param('rng_device'), str(entity.rng_device.source) if entity.rng_device else None) and
+ equal(self.param('rng_bytes'), entity.rng_device.rate.bytes if entity.rng_device else None) and
+ equal(self.param('rng_period'), entity.rng_device.rate.period if entity.rng_device else None) and
+ equal(self.param('placement_policy'), str(entity.placement_policy.affinity) if entity.placement_policy else None)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ name=dict(type='str'),
+ id=dict(type='str'),
+ memory=dict(type='str'),
+ memory_guaranteed=dict(type='str'),
+ memory_max=dict(type='str'),
+ cpu_sockets=dict(type='int'),
+ cpu_cores=dict(type='int'),
+ cpu_threads=dict(type='int'),
+ operating_system=dict(type='str'),
+ boot_devices=dict(type='list', choices=['cdrom', 'hd', 'network'], elements='str'),
+ serial_console=dict(type='bool'),
+ usb_support=dict(type='bool'),
+ high_availability=dict(type='bool'),
+ high_availability_priority=dict(type='int'),
+ watchdog=dict(type='dict'),
+ host=dict(type='str'),
+ graphical_console=dict(type='dict'),
+ description=dict(type='str'),
+ cpu_mode=dict(type='str'),
+ rng_device=dict(type='str'),
+ rng_bytes=dict(type='int', default=None),
+ rng_period=dict(type='int', default=None),
+ placement_policy=dict(type='str'),
+ cpu_pinning=dict(type='list', elements='dict'),
+ soundcard_enabled=dict(type='bool', default=None),
+ virtio_scsi=dict(type='bool', default=None),
+ smartcard_enabled=dict(type='bool', default=None),
+ io_threads=dict(type='int', default=None),
+ nics=dict(type='list', default=[], elements='dict'),
+ ballooning_enabled=dict(type='bool', default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['id', 'name']],
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ state = module.params['state']
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ its_service = connection.system_service().instance_types_service()
+ its_module = InstanceTypeModule(
+ connection=connection,
+ module=module,
+ service=its_service,
+ )
+ it = its_module.search_entity()
+
+ if state == 'present':
+ ret = its_module.create(
+ entity=it
+ )
+ its_module.post_present(ret['id'])
+ ret['changed'] = its_module.changed
+ elif state == 'absent':
+ ret = its_module.remove()
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_job.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_job.py
new file mode 100644
index 000000000..b30129e13
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_job.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_job
+short_description: Module to manage jobs in oVirt/RHV
+version_added: "1.0.0"
+author: "Martin Necas (@mnecas)"
+description:
+ - "This module manage jobs in oVirt/RHV. It can also manage steps of the job."
+options:
+ description:
+ description:
+ - "Description of the job."
+ - "When task with same description has already finished and you rerun taks it will create new job."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the job be C(present)/C(absent)/C(failed)."
+ - "C(started) is alias for C(present). C(finished) is alias for C(absent). Same in the steps."
+ - "Note when C(finished)/C(failed) it will finish/fail all steps."
+ choices: ['present', 'absent', 'started', 'finished', 'failed']
+ default: present
+ type: str
+ steps:
+ description:
+ - "The steps of the job."
+ suboptions:
+ description:
+ description:
+ - "Description of the step."
+ required: true
+ state:
+ description:
+ - "Should the step be present/absent/failed."
+ - "Note when one step fail whole job will fail"
+ - "Note when all steps are finished it will finish job."
+ choices: ['present', 'absent', 'started', 'finished', 'failed']
+ default: present
+ type: list
+ elements: dict
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Create job with two steps
+ ovirt.ovirt.ovirt_job:
+ description: job_name
+ steps:
+ - description: step_name_A
+ - description: step_name_B
+
+- name: Finish one step
+ ovirt.ovirt.ovirt_job:
+ description: job_name
+ steps:
+ - description: step_name_A
+ state: finished
+
+- name: When you fail one step whole job will stop
+ ovirt.ovirt.ovirt_job:
+ description: job_name
+ steps:
+ - description: step_name_B
+ state: failed
+
+- name: Finish all steps
+ ovirt.ovirt.ovirt_job:
+ description: job_name
+ state: finished
+'''
+
+RETURN = '''
+id:
+ description: ID of the job which is managed
+ returned: On success if job is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+job:
+ description: "Dictionary of all the job attributes. Job attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/job."
+ returned: On success if job is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ equal,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ get_dict_of_struct,
+)
+
+
+def build_job(description):
+ return otypes.Job(
+ description=description,
+ status=otypes.JobStatus.STARTED,
+ external=True,
+ auto_cleared=True
+ )
+
+
+def build_step(description, job_id):
+ return otypes.Step(
+ description=description,
+ type=otypes.StepEnum.UNKNOWN,
+ job=otypes.Job(
+ id=job_id
+ ),
+ status=otypes.StepStatus.STARTED,
+ external=True,
+ )
+
+
+def attach_steps(module, job_id, jobs_service):
+ changed = False
+ steps_service = jobs_service.job_service(job_id).steps_service()
+ if module.params.get('steps'):
+ for step in module.params.get('steps'):
+ step_entity = get_entity(steps_service, step.get('description'))
+ step_state = step.get('state', 'present')
+ if step_state in ['present', 'started']:
+ if step_entity is None:
+ steps_service.add(build_step(step.get('description'), job_id))
+ changed = True
+ if step_entity is not None and step_entity.status not in [otypes.StepStatus.FINISHED, otypes.StepStatus.FAILED]:
+ if step_state in ['absent', 'finished']:
+ steps_service.step_service(step_entity.id).end(succeeded=True)
+ changed = True
+ elif step_state == 'failed':
+ steps_service.step_service(step_entity.id).end(succeeded=False)
+ changed = True
+ return changed
+
+
+def get_entity(service, description):
+ all_entities = service.list()
+ for entity in all_entities:
+ if entity.description == description and entity.status not in [otypes.StepStatus.FINISHED, otypes.JobStatus.FINISHED]:
+ return entity
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'started', 'finished', 'failed'],
+ default='present',
+ ),
+ description=dict(required=True),
+ steps=dict(default=None, type='list', elements='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ jobs_service = connection.system_service().jobs_service()
+
+ state = module.params['state']
+ job = get_entity(jobs_service, module.params['description'])
+ changed = False
+ if state in ['present', 'started']:
+ if job is None or job.status in [otypes.JobStatus.FINISHED, otypes.JobStatus.FAILED]:
+ job = jobs_service.add(build_job(module.params['description']))
+ changed = True
+ changed = attach_steps(module, job.id, jobs_service) or changed
+
+ if job is not None and job.status not in [otypes.JobStatus.FINISHED, otypes.JobStatus.FAILED]:
+ if state in ['absent', 'finished']:
+ jobs_service.job_service(job.id).end(succeeded=True)
+ changed = True
+
+ elif state == 'failed':
+ jobs_service.job_service(job.id).end(succeeded=False)
+ changed = True
+
+ ret = {
+ 'changed': changed,
+ 'id': getattr(job, 'id', None),
+ 'job': get_dict_of_struct(
+ struct=job,
+ connection=connection,
+ fetch_nested=True,
+ attributes=module.params.get('nested_attributes'),
+ ),
+ }
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_mac_pool.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_mac_pool.py
new file mode 100644
index 000000000..257fbce49
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_mac_pool.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_mac_pool
+short_description: Module to manage MAC pools in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "This module manage MAC pools in oVirt/RHV."
+options:
+ id:
+ description:
+ - "ID of the mac pool to manage."
+ type: str
+ name:
+ description:
+ - "Name of the MAC pool to manage."
+ required: true
+ type: str
+ description:
+ description:
+ - "Description of the MAC pool."
+ type: str
+ state:
+ description:
+ - "Should the mac pool be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ allow_duplicates:
+ description:
+ - "If I(true) allow a MAC address to be used multiple times in a pool."
+ - "Default value is set by oVirt/RHV engine to I(false)."
+ type: bool
+ ranges:
+ description:
+ - "List of MAC ranges. The from and to should be split by comma."
+ - "For example: 00:1a:4a:16:01:51,00:1a:4a:16:01:61"
+ type: list
+ elements: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create MAC pool:
+- ovirt.ovirt.ovirt_mac_pool:
+ name: mymacpool
+ allow_duplicates: false
+ ranges:
+ - 00:1a:4a:16:01:51,00:1a:4a:16:01:61
+ - 00:1a:4a:16:02:51,00:1a:4a:16:02:61
+
+# Remove MAC pool:
+- ovirt.ovirt.ovirt_mac_pool:
+ state: absent
+ name: mymacpool
+
+# Change MAC pool Name
+- ovirt.ovirt.ovirt_nic:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_mac_pool_name"
+'''
+
+RETURN = '''
+id:
+ description: ID of the MAC pool which is managed
+ returned: On success if MAC pool is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+template:
+ description: "Dictionary of all the MAC pool attributes. MAC pool attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/mac_pool."
+ returned: On success if MAC pool is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ equal,
+ create_connection,
+ ovirt_full_argument_spec,
+)
+
+
+class MACPoolModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.MacPool(
+ name=self._module.params['name'],
+ id=self._module.params['id'],
+ allow_duplicates=self._module.params['allow_duplicates'],
+ description=self._module.params['description'],
+ ranges=[
+ otypes.Range(
+ from_=mac_range.split(',')[0],
+ to=mac_range.split(',')[1],
+ )
+ for mac_range in self._module.params['ranges']
+ ] if self._module.params['ranges'] else None,
+ )
+
+ def _compare_ranges(self, entity):
+ if self._module.params['ranges'] is not None:
+ ranges = sorted([
+ '%s,%s' % (mac_range.from_, mac_range.to)
+ for mac_range in entity.ranges
+ ])
+ return equal(sorted(self._module.params['ranges']), ranges)
+
+ return True
+
+ def update_check(self, entity):
+ return (
+ self._compare_ranges(entity) and
+ equal(self._module.params['allow_duplicates'], entity.allow_duplicates) and
+ equal(self._module.params['description'], entity.description) and
+ equal(self._module.params['name'], entity.name)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ id=dict(default=None),
+ allow_duplicates=dict(default=None, type='bool'),
+ description=dict(default=None),
+ ranges=dict(default=None, type='list', elements='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ mac_pools_service = connection.system_service().mac_pools_service()
+ mac_pools_module = MACPoolModule(
+ connection=connection,
+ module=module,
+ service=mac_pools_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = mac_pools_module.create()
+ elif state == 'absent':
+ ret = mac_pools_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network.py
new file mode 100644
index 000000000..889914ae1
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_network
+short_description: Module to manage logical networks in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage logical networks in oVirt/RHV"
+options:
+ id:
+ description:
+ - "ID of the network to manage."
+ type: str
+ name:
+ description:
+ - "Name of the network to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the network be present or absent"
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ data_center:
+ description:
+ - "Datacenter name where network reside."
+ type: str
+ required: true
+ description:
+ description:
+ - "Description of the network."
+ type: str
+ comment:
+ description:
+ - "Comment of the network."
+ type: str
+ vlan_tag:
+ description:
+ - "Specify VLAN tag."
+ - "NOTE - To remove the vlan_tag use -1."
+ type: int
+ external_provider:
+ description:
+ - "Name of external network provider."
+ - "At first it tries to import the network when not found it will create network in external provider."
+ type: str
+ vm_network:
+ description:
+ - "If I(True) network will be marked as network for VM."
+ - "VM network carries traffic relevant to the virtual machine."
+ type: bool
+ mtu:
+ description:
+ - "Maximum transmission unit (MTU) of the network."
+ type: int
+ clusters:
+ description:
+ - "List of dictionaries describing how the network is managed in specific cluster."
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Cluster name.
+ assigned:
+ description:
+ - I(true) if the network should be assigned to cluster. Default is I(true).
+ type: bool
+ required:
+ description:
+ - I(true) if the network must remain operational for all hosts associated with this network.
+ type: bool
+ display:
+ description:
+ - I(true) if the network should marked as display network.
+ type: bool
+ migration:
+ description:
+ - I(true) if the network should marked as migration network.
+ type: bool
+ gluster:
+ description:
+ - I(true) if the network should marked as gluster network.
+ type: bool
+ label:
+ description:
+ - "Name of the label to assign to the network."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create network
+- ovirt.ovirt.ovirt_network:
+ data_center: mydatacenter
+ name: mynetwork
+ vlan_tag: 10
+ vm_network: true
+
+# Remove network
+- ovirt.ovirt.ovirt_network:
+ state: absent
+ name: mynetwork
+
+# Change Network Name
+- ovirt.ovirt.ovirt_network:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_network_name"
+ data_center: mydatacenter
+
+# Add network from external provider
+- ovirt.ovirt.ovirt_network:
+ data_center: mydatacenter
+ name: mynetwork
+ external_provider: ovirt-provider-ovn
+
+# Remove vlan_tag
+- ovirt.ovirt.ovirt_network:
+ data_center: mydatacenter
+ name: mynetwork
+ vlan_tag: -1
+'''
+
+RETURN = '''
+id:
+ description: "ID of the managed network"
+ returned: "On success if network is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+network:
+ description: "Dictionary of all the network attributes. Network attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
+ returned: "On success if network is found."
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+ get_id_by_name,
+ get_dict_of_struct,
+ get_entity
+)
+
+
+class NetworksModule(BaseModule):
+ def build_entity(self):
+ if self.param('external_provider'):
+ ons_service = self._connection.system_service().openstack_network_providers_service()
+ on_service = ons_service.provider_service(get_id_by_name(ons_service, self.param('external_provider')))
+ return otypes.Network(
+ name=self._module.params['name'],
+ comment=self._module.params['comment'],
+ description=self._module.params['description'],
+ id=self._module.params['id'],
+ data_center=otypes.DataCenter(
+ name=self._module.params['data_center'],
+ ) if self._module.params['data_center'] else None,
+ vlan=otypes.Vlan(
+ self._module.params['vlan_tag'] if self._module.params['vlan_tag'] != -1 else None,
+ ) if self._module.params['vlan_tag'] is not None else None,
+ usages=[
+ otypes.NetworkUsage.VM if self._module.params['vm_network'] else None
+ ] if self._module.params['vm_network'] is not None else None,
+ mtu=self._module.params['mtu'],
+ external_provider=otypes.OpenStackNetworkProvider(id=on_service.get().id)
+ if self.param('external_provider') else None,
+ )
+
+ def post_create(self, entity):
+ self._update_label_assignments(entity)
+
+ def _update_label_assignments(self, entity):
+ if self.param('label') is None:
+ return
+
+ labels_service = self._service.service(entity.id).network_labels_service()
+ labels = [lbl.id for lbl in labels_service.list()]
+ if not self.param('label') in labels:
+ if not self._module.check_mode:
+ if labels:
+ labels_service.label_service(labels[0]).remove()
+ labels_service.add(
+ label=otypes.NetworkLabel(id=self.param('label'))
+ )
+ self.changed = True
+
+ def update_check(self, entity):
+ self._update_label_assignments(entity)
+ vlan_tag_changed = equal(self._module.params.get('vlan_tag'), getattr(entity.vlan, 'id', None))
+ if self._module.params.get('vlan_tag') == -1:
+ vlan_tag_changed = getattr(entity.vlan, 'id', None) is None
+ return (
+ vlan_tag_changed and
+ equal(self._module.params.get('comment'), entity.comment) and
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('vm_network'), True if entity.usages else False) and
+ equal(self._module.params.get('mtu'), entity.mtu)
+ )
+
+
+class ClusterNetworksModule(BaseModule):
+
+ def __init__(self, network_id, cluster_network, *args, **kwargs):
+ super(ClusterNetworksModule, self).__init__(*args, **kwargs)
+ self._network_id = network_id
+ self._cluster_network = cluster_network
+ self._old_usages = []
+ self._cluster_network_entity = get_entity(self._service.network_service(network_id))
+ if self._cluster_network_entity is not None:
+ self._old_usages = self._cluster_network_entity.usages
+
+ def build_entity(self):
+ return otypes.Network(
+ id=self._network_id,
+ name=self._module.params['name'],
+ required=self._cluster_network.get('required'),
+ display=self._cluster_network.get('display'),
+ usages=list(set([
+ otypes.NetworkUsage(usage)
+ for usage in ['display', 'gluster', 'migration']
+ if self._cluster_network.get(usage, False)
+ ] + self._old_usages))
+ if (
+ self._cluster_network.get('display') is not None or
+ self._cluster_network.get('gluster') is not None or
+ self._cluster_network.get('migration') is not None
+ ) else None,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._cluster_network.get('required'), entity.required) and
+ equal(self._cluster_network.get('display'), entity.display) and
+ all(
+ x in [
+ str(usage)
+ for usage in getattr(entity, 'usages', [])
+ # VM + MANAGEMENT is part of root network
+ if usage != otypes.NetworkUsage.VM and usage != otypes.NetworkUsage.MANAGEMENT
+ ]
+ for x in [
+ usage
+ for usage in ['display', 'gluster', 'migration']
+ if self._cluster_network.get(usage, False)
+ ]
+ )
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ data_center=dict(required=True),
+ id=dict(default=None),
+ name=dict(required=True),
+ description=dict(default=None),
+ comment=dict(default=None),
+ external_provider=dict(default=None),
+ vlan_tag=dict(default=None, type='int'),
+ vm_network=dict(default=None, type='bool'),
+ mtu=dict(default=None, type='int'),
+ clusters=dict(default=None, type='list', elements='dict'),
+ label=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ clusters_service = connection.system_service().clusters_service()
+ networks_service = connection.system_service().networks_service()
+ networks_module = NetworksModule(
+ connection=connection,
+ module=module,
+ service=networks_service,
+ )
+ state = module.params['state']
+ search_params = {
+ 'name': module.params['name'],
+ 'datacenter': module.params['data_center'],
+ }
+ if state == 'present':
+ imported = False
+ if module.params.get('external_provider') and module.params.get('name') not in [net.name for net in networks_service.list()]:
+ # Try to import network
+ ons_service = connection.system_service().openstack_network_providers_service()
+ on_service = ons_service.provider_service(get_id_by_name(ons_service, module.params.get('external_provider')))
+ on_networks_service = on_service.networks_service()
+ if module.params.get('name') in [net.name for net in on_networks_service.list()]:
+ network_service = on_networks_service.network_service(get_id_by_name(on_networks_service, module.params.get('name')))
+ network_service.import_(data_center=otypes.DataCenter(name=module.params.get('data_center')))
+ imported = True
+
+ ret = networks_module.create(search_params=search_params)
+ ret['changed'] = ret['changed'] or imported
+ # Update clusters networks:
+ if module.params.get('clusters') is not None:
+ for param_cluster in module.params.get('clusters'):
+ cluster = search_by_name(clusters_service, param_cluster.get('name'))
+ if cluster is None:
+ raise Exception("Cluster '%s' was not found." % param_cluster.get('name'))
+ cluster_networks_service = clusters_service.service(cluster.id).networks_service()
+ cluster_networks_module = ClusterNetworksModule(
+ network_id=ret['id'],
+ cluster_network=param_cluster,
+ connection=connection,
+ module=module,
+ service=cluster_networks_service,
+ )
+ if param_cluster.get('assigned', True):
+ ret = cluster_networks_module.create()
+ else:
+ ret = cluster_networks_module.remove()
+
+ elif state == 'absent':
+ ret = networks_module.remove(search_params=search_params)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network_info.py
new file mode 100644
index 000000000..b65e03643
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network_info.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_network_info
+short_description: Retrieve information about one or more oVirt/RHV networks
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV networks."
+ - This module was called C(ovirt_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_network_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_networks), which
+ contains a list of networks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search network starting with string vlan1 use: name=vlan1*"
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/network/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all networks which names start with C(vlan1):
+- ovirt.ovirt.ovirt_network_info:
+ pattern: name=vlan1*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_networks }}"
+'''
+
+
+RETURN = '''
+ovirt_networks:
+ description: "List of dictionaries describing the networks. Network attributes are mapped to dictionary keys,
+ all networks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ networks_service = connection.system_service().networks_service()
+ networks = networks_service.list(
+ search=module.params['pattern'],
+ follow=",".join(module.params['follow'])
+ )
+ result = dict(
+ ovirt_networks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in networks
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic.py
new file mode 100644
index 000000000..dc1c1801f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_nic
+short_description: Module to manage network interfaces of Virtual Machines in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - Module to manage network interfaces of Virtual Machines in oVirt/RHV.
+options:
+ id:
+ description:
+ - "ID of the nic to manage."
+ type: str
+ name:
+ description:
+ - Name of the network interface to manage.
+ required: true
+ type: str
+ vm:
+ description:
+ - Name of the Virtual Machine to manage.
+ - You must provide either C(vm) parameter or C(template) parameter.
+ type: str
+ template:
+ description:
+ - Name of the template to manage.
+ - You must provide either C(vm) parameter or C(template) parameter.
+ type: str
+ template_version:
+ description:
+ - Version number of the template.
+ type: int
+ version_added: 1.2.0
+ state:
+ description:
+ - Should the Virtual Machine NIC be present/absent/plugged/unplugged.
+ choices: [ absent, plugged, present, unplugged ]
+ default: present
+ type: str
+ network:
+ description:
+ - Logical network to which the VM network interface should use,
+ by default Empty network is used if network is not specified.
+ type: str
+ profile:
+ description:
+ - Virtual network interface profile to be attached to VM network interface.
+ - When not specified and network has only single profile it will be auto-selected, otherwise you must specify profile.
+ type: str
+ interface:
+ description:
+ - "Type of the network interface. For example e1000, pci_passthrough, rtl8139, rtl8139_virtio, spapr_vlan or virtio."
+ - "It's required parameter when creating the new NIC."
+ type: str
+ mac_address:
+ description:
+ - Custom MAC address of the network interface, by default it's obtained from MAC pool.
+ type: str
+ linked:
+ description:
+ - Defines if the NIC is linked to the virtual machine.
+ type: bool
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Add NIC to VM
+ ovirt.ovirt.ovirt_nic:
+ state: present
+ vm: myvm
+ name: mynic
+ interface: e1000
+ mac_address: 00:1a:4a:16:01:56
+ profile: ovirtmgmt
+ network: ovirtmgmt
+
+- name: Plug NIC to VM
+ ovirt.ovirt.ovirt_nic:
+ state: plugged
+ vm: myvm
+ name: mynic
+
+- name: Unplug NIC from VM
+ ovirt.ovirt.ovirt_nic:
+ state: unplugged
+ linked: false
+ vm: myvm
+ name: mynic
+
+- name: Add NIC to template
+ ovirt.ovirt.ovirt_nic:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ template: my_template
+ name: nic1
+ interface: virtio
+ profile: ovirtmgmt
+ network: ovirtmgmt
+
+- name: Remove NIC from VM
+ ovirt.ovirt.ovirt_nic:
+ state: absent
+ vm: myvm
+ name: mynic
+
+# Change NIC Name
+- ovirt.ovirt.ovirt_nic:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_nic_name"
+ vm: myvm
+'''
+
+RETURN = '''
+id:
+ description: ID of the network interface which is managed
+ returned: On success if network interface is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+nic:
+ description: "Dictionary of all the network interface attributes. Network interface attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
+ returned: On success if network interface is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+class EntityNicsModule(BaseModule):
+
+ def __init__(self, *args, **kwargs):
+ super(EntityNicsModule, self).__init__(*args, **kwargs)
+ self.vnic_id = None
+
+ @property
+ def vnic_id(self):
+ return self._vnic_id
+
+ @vnic_id.setter
+ def vnic_id(self, vnic_id):
+ self._vnic_id = vnic_id
+
+ def build_entity(self):
+ return otypes.Nic(
+ id=self._module.params.get('id'),
+ name=self._module.params.get('name'),
+ interface=otypes.NicInterface(
+ self._module.params.get('interface')
+ ) if self._module.params.get('interface') else None,
+ vnic_profile=otypes.VnicProfile(
+ id=self.vnic_id,
+ ) if self.vnic_id else None,
+ mac=otypes.Mac(
+ address=self._module.params.get('mac_address')
+ ) if self._module.params.get('mac_address') else None,
+ linked=self.param('linked') if self.param('linked') is not None else None,
+ )
+
+ def update_check(self, entity):
+ if self._module.params.get('vm'):
+ return (
+ equal(self._module.params.get('interface'), str(entity.interface)) and
+ equal(self._module.params.get('linked'), entity.linked) and
+ equal(self._module.params.get('name'), str(entity.name)) and
+ equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile)) and
+ equal(self._module.params.get('mac_address'), entity.mac.address)
+ )
+ elif self._module.params.get('template'):
+ return (
+ equal(self._module.params.get('interface'), str(entity.interface)) and
+ equal(self._module.params.get('linked'), entity.linked) and
+ equal(self._module.params.get('name'), str(entity.name)) and
+ equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile))
+ )
+
+
+def get_vnics(networks_service, network, connection):
+ resp = []
+ vnic_services = connection.system_service().vnic_profiles_service()
+ for vnic in vnic_services.list():
+ if vnic.network.id == network.id:
+ resp.append(vnic)
+ return resp
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present', choices=['absent', 'plugged', 'present', 'unplugged']),
+ vm=dict(type='str'),
+ id=dict(default=None),
+ template=dict(type='str'),
+ name=dict(type='str', required=True),
+ interface=dict(type='str'),
+ template_version=dict(type='int', default=None),
+ profile=dict(type='str'),
+ network=dict(type='str'),
+ mac_address=dict(type='str'),
+ linked=dict(type='bool'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['vm', 'template']],
+ )
+
+ check_sdk(module)
+
+ try:
+ # Locate the service that manages the virtual machines and use it to
+ # search for the NIC:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ entity_name = None
+
+ if module.params.get('vm'):
+ # Locate the VM, where we will manage NICs:
+ entity_name = module.params.get('vm')
+ collection_service = connection.system_service().vms_service()
+ elif module.params.get('template'):
+ entity_name = module.params.get('template')
+ collection_service = connection.system_service().templates_service()
+
+ # TODO: We have to modify the search_by_name function to accept raise_error=True/False,
+ if module.params['template_version'] is not None:
+ entity = [
+ t for t in collection_service.list()
+ if t.version.version_number == module.params['template_version']
+ ]
+ if not entity:
+ raise ValueError(
+ "Template with name '%s' and version '%s' was not found'" % (
+ module.params['template'],
+ module.params['template_version']
+ )
+ )
+ entity = entity[0]
+ else:
+ entity = search_by_name(collection_service, entity_name)
+ if entity is None:
+ raise Exception("Vm/Template '%s' was not found." % entity_name)
+
+ service = collection_service.service(entity.id)
+ cluster_id = entity.cluster
+
+ nics_service = service.nics_service()
+ entitynics_module = EntityNicsModule(
+ connection=connection,
+ module=module,
+ service=nics_service,
+ )
+
+ # Find vNIC id of the network interface (if any):
+ if module.params['network']:
+ profile = module.params.get('profile')
+ cluster_name = get_link_name(connection, cluster_id)
+ dcs_service = connection.system_service().data_centers_service()
+ dc = dcs_service.list(search='Clusters.name=%s' % cluster_name)[0]
+ networks_service = dcs_service.service(dc.id).networks_service()
+ network = next(
+ (n for n in networks_service.list()
+ if n.name == module.params['network']),
+ None
+ )
+ if network is None:
+ raise Exception(
+ "Network '%s' was not found in datacenter '%s'." % (
+ module.params['network'],
+ dc.name
+ )
+ )
+ if profile:
+ for vnic in connection.system_service().vnic_profiles_service().list():
+ if vnic.name == profile and vnic.network.id == network.id:
+ entitynics_module.vnic_id = vnic.id
+ else:
+ # When not specified which vnic use ovirtmgmt/ovirtmgmt
+ vnics = get_vnics(networks_service, network, connection)
+ if len(vnics) == 1:
+ entitynics_module.vnic_id = vnics[0].id
+ else:
+ raise Exception(
+ "You didn't specify any vnic profile. "
+ "Following vnic profiles are in system: '%s', please specify one of them" % ([vnic.name for vnic in vnics])
+ )
+ # Handle appropriate action:
+ state = module.params['state']
+ if state == 'present':
+ ret = entitynics_module.create()
+ elif state == 'absent':
+ ret = entitynics_module.remove()
+ elif state == 'plugged':
+ entitynics_module.create()
+ ret = entitynics_module.action(
+ action='activate',
+ action_condition=lambda nic: not nic.plugged,
+ wait_condition=lambda nic: nic.plugged,
+ )
+ elif state == 'unplugged':
+ entitynics_module.create()
+ ret = entitynics_module.action(
+ action='deactivate',
+ action_condition=lambda nic: nic.plugged,
+ wait_condition=lambda nic: not nic.plugged,
+ )
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic_info.py
new file mode 100644
index 000000000..c1daede60
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic_info.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_nic_info
+short_description: Retrieve information about one or more oVirt/RHV virtual machine network interfaces
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine network interfaces."
+ - This module was called C(ovirt_nic_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_nic_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_nics), which
+ contains a list of NICs. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM where NIC is attached."
+ - You must provide either C(vm) parameter or C(template) parameter.
+ type: str
+ template:
+ description:
+ - "Name of the template where NIC is attached."
+ - You must provide either C(vm) parameter or C(template) parameter.
+ type: str
+ version_added: 1.2.0
+ name:
+ description:
+ - "Name of the NIC, can be used as glob expression."
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/nic/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all NICs which names start with C(eth) for VM named C(centos7):
+- ovirt.ovirt.ovirt_nic_info:
+ vm: centos7
+ name: eth*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_nics }}"
+'''
+
+RETURN = '''
+ovirt_nics:
+ description: "List of dictionaries describing the network interfaces. NIC attributes are mapped to dictionary keys,
+ all NICs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(default=None),
+ template=dict(default=None),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ required_one_of=[['vm', 'template']],
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+
+ if module.params.get('vm'):
+ # Locate the VM, where we will manage NICs:
+ entity_name = module.params.get('vm')
+ collection_service = connection.system_service().vms_service()
+ elif module.params.get('template'):
+ entity_name = module.params.get('template')
+ collection_service = connection.system_service().templates_service()
+ entity = search_by_name(collection_service, entity_name)
+ if entity is None:
+ raise Exception("VM/Template '%s' was not found." % entity_name)
+
+ nics_service = collection_service.service(entity.id).nics_service()
+ if module.params['name']:
+ nics = [
+ e for e in nics_service.list(follow=",".join(module.params['follow']))
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ nics = nics_service.list(follow=",".join(module.params['follow']))
+
+ result = dict(
+ ovirt_nics=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in nics
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission.py
new file mode 100644
index 000000000..350cacaf1
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission.py
@@ -0,0 +1,338 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_permission
+short_description: Module to manage permissions of users/groups in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - Module to manage permissions of users/groups in oVirt/RHV.
+options:
+ role:
+ description:
+ - Name of the role to be assigned to user/group on specific object.
+ default: UserRole
+ type: str
+ state:
+ description:
+ - Should the permission be present/absent.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ object_id:
+ description:
+ - ID of the object where the permissions should be managed.
+ type: str
+ object_name:
+ description:
+ - Name of the object where the permissions should be managed.
+ type: str
+ object_type:
+ description:
+ - The object where the permissions should be managed.
+ choices:
+ - cluster
+ - cpu_profile
+ - data_center
+ - disk
+ - disk_profile
+ - host
+ - network
+ - storage_domain
+ - system
+ - template
+ - vm
+ - vm_pool
+ - vnic_profile
+ - mac_pool
+ default: vm
+ type: str
+ user_name:
+ description:
+ - Username of the user to manage. In most LDAPs it's I(uid) of the user,
+ but in Active Directory you must specify I(UPN) of the user.
+ - Note that if user does not exist in the system this module will fail,
+ you should ensure the user exists by using M(ovirt.ovirt.ovirt_users) module.
+ type: str
+ group_name:
+ description:
+ - Name of the group to manage.
+ - Note that if group does not exist in the system this module will fail,
+ you should ensure the group exists by using M(ovirt.ovirt.ovirt_groups) module.
+ type: str
+ authz_name:
+ description:
+ - Authorization provider of the user/group.
+ required: true
+ aliases: [ domain ]
+ type: str
+ namespace:
+ description:
+ - Namespace of the authorization provider, where user/group resides.
+ type: str
+ quota_name:
+ description:
+ - Name of the quota to assign permission. Works only with C(object_type) I(data_center).
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Add user user1 from authorization provider example.com-authz
+ ovirt.ovirt.ovirt_permission:
+ user_name: user1
+ authz_name: example.com-authz
+ object_type: vm
+ object_name: myvm
+ role: UserVmManager
+
+- name: Remove permission from user
+ ovirt.ovirt.ovirt_permission:
+ state: absent
+ user_name: user1
+ authz_name: example.com-authz
+ object_type: cluster
+ object_name: mycluster
+ role: ClusterAdmin
+
+- name: Assign QuotaConsumer role to user
+ ovirt.ovirt.ovirt_permissions:
+ state: present
+ user_name: user1
+ authz_name: example.com-authz
+ object_type: data_center
+ object_name: mydatacenter
+ quota_name: myquota
+ role: QuotaConsumer
+
+- name: Assign QuotaConsumer role to group
+ ovirt.ovirt.ovirt_permissions:
+ state: present
+ group_name: group1
+ authz_name: example.com-authz
+ object_type: data_center
+ object_name: mydatacenter
+ quota_name: myquota
+ role: QuotaConsumer
+
+- ovirt.ovirt.ovirt_permission:
+ user_name: user1
+ authz_name: example.com-authz
+ object_type: mac_pool
+ object_name: Default
+ role: MacPoolUser
+'''
+
+RETURN = '''
+id:
+ description: ID of the permission which is managed
+ returned: On success if permission is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+permission:
+ description: "Dictionary of all the permission attributes. Permission attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
+ returned: On success if permission is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ follow_link,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+ get_id_by_name
+)
+
+
+def _objects_service(connection, object_type):
+ if object_type == 'system':
+ return connection.system_service()
+
+ return getattr(
+ connection.system_service(),
+ '%ss_service' % object_type,
+ None,
+ )()
+
+
+def _object_service(connection, module):
+ object_type = module.params['object_type']
+ objects_service = _objects_service(connection, object_type)
+ if object_type == 'system':
+ return objects_service
+
+ object_id = module.params['object_id']
+ if object_id is None:
+ sdk_object = search_by_name(objects_service, module.params['object_name'])
+ if sdk_object is None:
+ raise Exception(
+ "'%s' object '%s' was not found." % (
+ module.params['object_type'],
+ module.params['object_name']
+ )
+ )
+ object_id = sdk_object.id
+
+ object_service = objects_service.service(object_id)
+ if module.params['quota_name'] and object_type == 'data_center':
+ quotas_service = object_service.quotas_service()
+ return quotas_service.quota_service(get_id_by_name(quotas_service, module.params['quota_name']))
+ return object_service
+
+
+def _permission(module, permissions_service, connection):
+ for permission in permissions_service.list():
+ user = follow_link(connection, permission.user)
+ if (
+ equal(module.params['user_name'], user.principal if user else None) and
+ equal(module.params['group_name'], get_link_name(connection, permission.group)) and
+ equal(module.params['role'], get_link_name(connection, permission.role))
+ ):
+ return permission
+
+
+class PermissionsModule(BaseModule):
+
+ def _user(self):
+ user = search_by_attributes(
+ self._connection.system_service().users_service(),
+ usrname="{name}@{authz_name}".format(
+ name=self._module.params['user_name'],
+ authz_name=self._module.params['authz_name'],
+ ),
+ )
+ if user is None:
+ raise Exception("User '%s' was not found." % self._module.params['user_name'])
+ return user
+
+ def _group(self):
+ groups = self._connection.system_service().groups_service().list(
+ search='name="{name}"'.format(
+ name=self._module.params['group_name'],
+ )
+ )
+
+ # If found more groups, filter them by namespace and authz name:
+ # (filtering here, as oVirt/RHV backend doesn't support it)
+ if len(groups) > 1:
+ groups = [
+ g for g in groups if (
+ equal(self._module.params['namespace'], g.namespace) and
+ equal(self._module.params['authz_name'], g.domain.name)
+ )
+ ]
+ if not groups:
+ raise Exception("Group '%s' was not found." % self._module.params['group_name'])
+ return groups[0]
+
+ def build_entity(self):
+ entity = self._group() if self._module.params['group_name'] else self._user()
+
+ return otypes.Permission(
+ user=otypes.User(
+ id=entity.id
+ ) if self._module.params['user_name'] else None,
+ group=otypes.Group(
+ id=entity.id
+ ) if self._module.params['group_name'] else None,
+ role=otypes.Role(
+ name=self._module.params['role']
+ ),
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ role=dict(type='str', default='UserRole'),
+ object_type=dict(type='str', default='vm',
+ choices=[
+ 'cluster',
+ 'cpu_profile',
+ 'data_center',
+ 'disk',
+ 'disk_profile',
+ 'host',
+ 'network',
+ 'storage_domain',
+ 'system',
+ 'template',
+ 'vm',
+ 'vm_pool',
+ 'vnic_profile',
+ 'mac_pool',
+ ]),
+ authz_name=dict(type='str', required=True, aliases=['domain']),
+ object_id=dict(type='str'),
+ object_name=dict(type='str'),
+ user_name=dict(type='str'),
+ group_name=dict(type='str'),
+ namespace=dict(type='str'),
+ quota_name=dict(type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ if (module.params['object_name'] is None and module.params['object_id'] is None) and module.params['object_type'] != 'system':
+ module.fail_json(msg='"object_name" or "object_id" is required')
+
+ if module.params['user_name'] is None and module.params['group_name'] is None:
+ module.fail_json(msg='"user_name" or "group_name" is required')
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ permissions_service = _object_service(connection, module).permissions_service()
+ permissions_module = PermissionsModule(
+ connection=connection,
+ module=module,
+ service=permissions_service,
+ )
+
+ permission = _permission(module, permissions_service, connection)
+ state = module.params['state']
+ if state == 'present':
+ ret = permissions_module.create(entity=permission)
+ elif state == 'absent':
+ ret = permissions_module.remove(entity=permission)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission_info.py
new file mode 100644
index 000000000..e8fabc669
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission_info.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_permission_info
+short_description: Retrieve information about one or more oVirt/RHV permissions
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV permissions."
+ - This module was called C(ovirt_permission_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_permission_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_permissions), which
+ contains a list of permissions. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ user_name:
+ description:
+ - "Username of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
+ type: str
+ group_name:
+ description:
+ - "Name of the group to manage."
+ type: str
+ authz_name:
+ description:
+ - "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain."
+ type: str
+ required: true
+ aliases: ['domain']
+ namespace:
+ description:
+ - "Namespace of the authorization provider, where user/group resides."
+ required: false
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/permission/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all permissions of user with username C(john):
+- ovirt.ovirt.ovirt_permission_info:
+ user_name: john
+ authz_name: example.com-authz
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_permissions }}"
+'''
+
+RETURN = '''
+ovirt_permissions:
+ description: "List of dictionaries describing the permissions. Permission attributes are mapped to dictionary keys,
+ all permissions attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_link_name,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def _permissions_service(connection, module):
+ if module.params['user_name']:
+ service = connection.system_service().users_service()
+ entity = next(
+ iter(
+ service.list(
+ search='usrname={0}'.format(
+ '{0}@{1}'.format(module.params['user_name'], module.params['authz_name'])
+ )
+ )
+ ),
+ None
+ )
+ else:
+ service = connection.system_service().groups_service()
+ entity = search_by_name(service, module.params['group_name'])
+
+ if entity is None:
+ raise Exception("User/Group wasn't found.")
+
+ return service.service(entity.id).permissions_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ authz_name=dict(required=True, aliases=['domain']),
+ user_name=dict(default=None),
+ group_name=dict(default=None),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ permissions_service = _permissions_service(connection, module)
+ permissions = []
+ for p in permissions_service.list(follow=",".join(module.params['follow'])):
+ newperm = dict()
+ for key, value in p.__dict__.items():
+ if value and isinstance(value, sdk.Struct):
+ newperm[key[1:]] = get_link_name(connection, value)
+ newperm['%s_id' % key[1:]] = value.id
+ permissions.append(newperm)
+
+ result = dict(ovirt_permissions=permissions)
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_qos.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_qos.py
new file mode 100644
index 000000000..4549452ac
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_qos.py
@@ -0,0 +1,379 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2022 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_qos
+short_description: "Module to manage QoS entries in ovirt"
+author:
+- "Niall O Donnell (@odonnelln)"
+description:
+ - "Module to manage QoS entries in ovirt."
+ - "Doesn't support updating a QoS that exists"
+ - "Only works with storage QoS entries atm"
+options:
+ id:
+ description:
+ - "ID of the QoS to manage. Either C(id) or C(name) is required."
+ type: str
+ name:
+ description:
+ - "Name of QoS to manage. Either C(id) or C(name)/C(alias) is required."
+ type: str
+ description:
+ description:
+ - "Description of the QoS."
+ type: str
+ data_center:
+ description:
+ - "Name of the data center where the QoS entry should be created."
+ type: str
+ max_iops:
+ description:
+ - "The max number of read/write iops. If passed you can't pass a value for C(read_iops) or C(write_iops)"
+ - "If no value is given it will default to the HE value, assuming C(read_iops) or C(write_iops) hasn't been set"
+ type: int
+ write_iops:
+ description:
+ - "The max number of write iops. If passed you can't pass a value for C(max_iops)"
+ - "If no value is given it will default to the HE value, assuming C(max_iops) hasn't been set"
+ type: int
+ read_iops:
+ description:
+ - "The max number of read iops. If passed you can't pass a value for C(max_iops)"
+ - "If no value is given it will default to the HE value, assuming C(max_iops) hasn't been set"
+ type: int
+ max_throughput:
+ description:
+ - "The max number of read/write throughput. If passed you can't pass a value for C(read_throughput) or C(write_throughput)"
+ - "If no value is given it will default to the HE value, assuming C(read_throughput) or C(write_throughput) hasn't been set"
+ type: int
+ write_throughput:
+ description:
+ - "The max number of write throughput. If passed you can't pass a value for C(max_throughput)"
+ - "If no value is given it will default to the HE value, assuming C(max_throughput) hasn't been set"
+ type: int
+ read_throughput:
+ description:
+ - "The max number of read throughput. If passed you can't pass a value for C(max_throughput)"
+ - "If no value is given it will default to the HE value, assuming C(max_throughput) hasn't been set"
+ type: int
+ cpu_limit:
+ description:
+ - "The maximum processing capability in %."
+ - "Used to configure computing resources."
+ type: int
+ inbound_average:
+ description:
+ - "The desired average inbound bit rate in Mbps (Megabits per sec)."
+ - "Used to configure virtual machines networks. If defined, C(inbound_peak) and C(inbound_burst) also has to be set."
+ - "See link:https://libvirt.org/formatnetwork.html#elementQoS[Libvirt-QOS] for further details."
+ type: int
+ inbound_peak:
+ description:
+ - "The maximum inbound rate in Mbps (Megabits per sec)."
+ - "Used to configure virtual machines networks. If defined, C(inbound_average) and C(inbound_burst) also has to be set."
+ - "See link:https://libvirt.org/formatnetwork.html#elementQoS[Libvirt-QOS] for further details."
+ type: int
+ inbound_burst:
+ description:
+ - "The amount of data that can be delivered in a single burst, in MB."
+ - "Used to configure virtual machine networks. If defined, C(inbound_average) and C(inbound_peak) must also be set."
+ - "See link:https://libvirt.org/formatnetwork.html#elementQoS[Libvirt-QOS] for further details."
+ type: int
+ outbound_average:
+ description:
+ - "The desired average outbound bit rate in Mbps (Megabits per sec)."
+ - "Used to configure virtual machines networks. If defined, C(outbound_peak) and C(outbound_burst) also has to be set."
+ - "See link:https://libvirt.org/formatnetwork.html#elementQoS[Libvirt-QOS] for further details."
+ type: int
+ outbound_peak:
+ description:
+ - "The maximum outbound rate in Mbps (Megabits per sec)."
+ - "Used to configure virtual machines networks. If defined, C(outbound_average) and C(outbound_burst) also has to be set."
+ - "See link:https://libvirt.org/formatnetwork.html#elementQoS[Libvirt-QOS] for further details."
+ type: int
+ outbound_burst:
+ description:
+ - "The amount of data that can be sent in a single burst, in MB."
+ - "Used to configure virtual machine networks. If defined, C(outbound_average) and C(outbound_peak) must also be set."
+ - "See link:https://libvirt.org/formatnetwork.html#elementQoS[Libvirt-QOS] for further details."
+ type: int
+ outbound_average_linkshare:
+ description:
+ - "Weighted share."
+ - "Used to configure host networks. Signifies how much of the logical link's capacity a specific network should be
+ allocated, relative to the other networks attached to the same logical link. The exact share depends on the sum
+ of shares of all networks on that link. By default this is a number in the range 1-100."
+ type: int
+ outbound_average_upperlimit:
+ description:
+ - "The maximum bandwidth to be used by a network in Mbps (Megabits per sec)."
+ - "Used to configure host networks. If C(outboundAverageUpperlimit) and
+ C(outbound_average_realtime) are provided, the C(outbound_averageUpperlimit) must not be lower than the C(outbound_average_realtime)."
+ type: int
+ outbound_average_realtime:
+ description:
+ - "The committed rate in Mbps (Megabits per sec)."
+ - "Used to configure host networks. The minimum bandwidth required by a network. The committed rate requested is not
+ guaranteed and will vary depending on the network infrastructure and the committed rate requested by other
+ networks on the same logical link."
+ type: int
+ type:
+ description:
+ - "The type of QoS."
+ choices: ['storage', 'cpu', 'network', 'hostnetwork']
+ type: str
+ state:
+ description:
+ - "Should the QoS be present/absent."
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+- name: Create a new storage QoS with default values for max_iops and max_throughput
+ ovirt.ovirt.ovirt_qos:
+ auth: "{{ ovirt_auth }}"
+ data_center: "Default"
+ name: "test_qos_01"
+ state: "present"
+ type: "storage"
+
+- name: Create a new storage QoS with default values for max_iops and read_throughput but 100 for write throughput
+ ovirt.ovirt.ovirt_qos:
+ auth: "{{ ovirt_auth }}"
+ data_center: "Default"
+ name: "test_qos_01"
+ state: "present"
+ type: "storage"
+ write_throughput: 100
+
+- name: Create a new storage QoS with default values for write_iops and max_throughput but 100 for read iops
+ ovirt.ovirt.ovirt_qos:
+ auth: "{{ ovirt_auth }}"
+ data_center: "Default"
+ name: "test_qos_01"
+ state: "present"
+ type: "storage"
+ read_iops: 100
+
+- name: Create a new storage QoS with 100 max_iops and 200 max_throughput
+ ovirt.ovirt.ovirt_qos:
+ auth: "{{ ovirt_auth }}"
+ data_center: "Default"
+ name: "test_qos_01"
+ state: "present"
+ type: "storage"
+ max_iops: 100
+ max_throughput: 100
+
+- name: Remove a storage QoS
+ ovirt.ovirt.ovirt_qos:
+ auth: "{{ ovirt_auth }}"
+ data_center: "Default"
+ name: "test_qos_01"
+ state: "absent"
+ type: "storage"
+
+- name: Add a network QoS
+ ovirt.ovirt.ovirt_qos:
+ auth: "{{ ovirt_auth }}"
+ name: "myqos"
+ data_center: "Default"
+ state: "present"
+ type: "network"
+ inbound_average: 10
+ inbound_peak: 10
+ inbound_burst: 10
+ outbound_average: 10
+ outbound_peak: 10
+ outbound_burst: 10
+
+- name: Add a hostnetwork QoS
+ ovirt.ovirt.ovirt_qos:
+ auth: "{{ ovirt_auth }}"
+ name: "myqos"
+ data_center: "Default"
+ state: "present"
+ type: "hostnetwork"
+ outbound_average_linkshare: 10
+ outbound_average_upperlimit: 100
+ outbound_average_realtime: 50
+
+- name: Add a hostnetwork QoS
+ ovirt.ovirt.ovirt_qos:
+ auth: "{{ ovirt_auth }}"
+ name: "myqos"
+ data_center: "Default"
+ state: "present"
+ type: "cpu"
+ cpu_limit: 10
+'''
+
+RETURN = '''
+id:
+ description: "ID of the managed QoS"
+ returned: "On success if QoS is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+qos:
+ description: "Dictionary of all the QoS attributes. QoS attributes can be found on your ovirt instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/qos."
+ returned: "On success if QoS is found."
+ type: dict
+'''
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ ovirt_full_argument_spec,
+ search_by_name,
+ get_entity,
+ get_id_by_name
+)
+
+
+class QosModule(BaseModule):
+
+ def _get_qos_type(self, type):
+ if type == 'storage':
+ return otypes.QosType.STORAGE
+ elif type == 'network':
+ return otypes.QosType.NETWORK
+ elif type == 'hostnetwork':
+ return otypes.QosType.HOSTNETWORK
+ elif type == 'cpu':
+ return otypes.QosType.CPU
+ return None
+
+ def build_entity(self):
+ """
+ Abstract method from BaseModule called from create() and remove()
+
+ Builds the QoS from the given params
+
+ :return: otypes.QoS
+ """
+ return otypes.Qos(
+ name=self.param('name'),
+ id=self.param('id'),
+ type=self._get_qos_type(self.param('type')),
+ description=self.param('description'),
+ max_iops=self.param('max_iops'),
+ max_read_iops=self.param('read_iops'),
+ max_read_throughput=self.param('read_throughput'),
+ max_throughput=self.param('max_throughput'),
+ max_write_iops=self.param('write_iops'),
+ cpu_limit=self.param('cpu_limit'),
+ inbound_average=self.param('inbound_average'),
+ inbound_peak=self.param('inbound_peak'),
+ inbound_burst=self.param('inbound_burst'),
+ outbound_average=self.param('outbound_average'),
+ outbound_peak=self.param('outbound_peak'),
+ outbound_burst=self.param('outbound_burst'),
+ outbound_average_linkshare=self.param('outbound_average_linkshare'),
+ outbound_average_upperlimit=self.param('outbound_average_upperlimit'),
+ outbound_average_realtime=self.param('outbound_average_realtime'),
+ )
+
+
+def _get_qoss_service(connection, dc_name):
+ """
+ Gets the qoss_service from the data_center provided
+
+ :returns: ovirt.services.QossService or None
+ """
+ dcs_service = connection.system_service().data_centers_service()
+ return dcs_service.data_center_service(get_id_by_name(dcs_service, dc_name)).qoss_service()
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(default=None),
+ description=dict(default=None),
+ data_center=dict(default=None),
+ max_iops=dict(default=None, type='int'),
+ read_iops=dict(default=None, type='int'),
+ write_iops=dict(default=None, type='int'),
+ max_throughput=dict(default=None, type='int'),
+ read_throughput=dict(default=None, type='int'),
+ write_throughput=dict(default=None, type='int'),
+ cpu_limit=dict(default=None, type='int'),
+ inbound_average=dict(default=None, type='int'),
+ inbound_peak=dict(default=None, type='int'),
+ inbound_burst=dict(default=None, type='int'),
+ outbound_average=dict(default=None, type='int'),
+ outbound_peak=dict(default=None, type='int'),
+ outbound_burst=dict(default=None, type='int'),
+ outbound_average_linkshare=dict(default=None, type='int'),
+ outbound_average_upperlimit=dict(default=None, type='int'),
+ outbound_average_realtime=dict(default=None, type='int'),
+ type=dict(
+ choices=['storage', 'cpu', 'network', 'hostnetwork'],
+ default=None,
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[['id', 'name']],
+ mutually_exclusive=[
+ ['max_iops', 'read_iops'],
+ ['max_iops', 'write_iops'],
+ ['max_throughput', 'read_throughput'],
+ ['max_throughput', 'write_throughput']
+ ],
+ required_together=[
+ ['inbound_average', 'inbound_peak', 'inbound_burst'],
+ ['outbound_average', 'outbound_peak', 'outbound_burst'],
+ ]
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ qoss_service = _get_qoss_service(connection, module.params.get('data_center'))
+
+ qos_module = QosModule(
+ connection=connection,
+ module=module,
+ service=qoss_service,
+ )
+
+ if module.params.get('state') == 'present':
+ ret = qos_module.create()
+ elif module.params.get('state') == 'absent':
+ ret = qos_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota.py
new file mode 100644
index 000000000..92a7f2eb5
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_quota
+short_description: Module to manage datacenter quotas in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage datacenter quotas in oVirt/RHV"
+options:
+ id:
+ description:
+ - "ID of the quota to manage."
+ type: str
+ name:
+ description:
+ - "Name of the quota to manage."
+ type: str
+ required: true
+ state:
+ description:
+ - "Should the quota be present/absent."
+ type: str
+ choices: ['present', 'absent']
+ default: present
+ data_center:
+ description:
+ - "Name of the datacenter where quota should be managed."
+ type: str
+ required: true
+ description:
+ description:
+ - "Description of the quota to manage."
+ type: str
+ cluster_threshold:
+ description:
+ - "Cluster threshold(soft limit) defined in percentage (0-100)."
+ type: int
+ aliases:
+ - "cluster_soft_limit"
+ cluster_grace:
+ description:
+ - "Cluster grace(hard limit) defined in percentage (1-100)."
+ type: int
+ aliases:
+ - "cluster_hard_limit"
+ storage_threshold:
+ description:
+ - "Storage threshold(soft limit) defined in percentage (0-100)."
+ type: int
+ aliases:
+ - "storage_soft_limit"
+ storage_grace:
+ description:
+ - "Storage grace(hard limit) defined in percentage (1-100)."
+ type: int
+ aliases:
+ - "storage_hard_limit"
+ clusters:
+ description:
+ - "List of dictionary of cluster limits, which is valid to specific cluster."
+ - "If cluster isn't specified it's valid to all clusters in system:"
+ type: list
+ elements: dict
+ suboptions:
+ cluster:
+ description:
+ - Name of the cluster.
+ memory:
+ description:
+ - Memory limit (in GiB).
+ cpu:
+ description:
+ - CPU limit.
+ storages:
+ description:
+ - "List of dictionary of storage limits, which is valid to specific storage."
+ - "If storage isn't specified it's valid to all storages in system:"
+ type: list
+ elements: dict
+ suboptions:
+ storage:
+ description:
+ - Name of the storage.
+ size:
+ description:
+ - Size limit (in GiB).
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add cluster quota to cluster cluster1 with memory limit 20GiB and CPU limit to 10:
+- ovirt.ovirt.ovirt_quota:
+ name: quota1
+ data_center: dcX
+ clusters:
+ - name: cluster1
+ memory: 20
+ cpu: 10
+
+# Add cluster quota to all clusters with memory limit 30GiB and CPU limit to 15:
+- ovirt.ovirt.ovirt_quota:
+ name: quota2
+ data_center: dcX
+ clusters:
+ - memory: 30
+ cpu: 15
+
+# Add storage quota to storage data1 with size limit to 100GiB
+- ovirt.ovirt.ovirt_quota:
+ name: quota3
+ data_center: dcX
+ storage_grace: 40
+ storage_threshold: 60
+ storages:
+ - name: data1
+ size: 100
+
+# Remove quota quota1 (Note the quota must not be assigned to any VM/disk):
+- ovirt.ovirt.ovirt_quota:
+ state: absent
+ data_center: dcX
+ name: quota1
+
+# Change Quota Name
+- ovirt.ovirt.ovirt_quota:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_quota_name"
+ data_center: dcX
+'''
+
+RETURN = '''
+id:
+ description: ID of the quota which is managed
+ returned: On success if quota is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+quota:
+ description: "Dictionary of all the quota attributes. Quota attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/quota."
+ returned: On success if quota is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+class QuotasModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Quota(
+ description=self._module.params['description'],
+ name=self._module.params['name'],
+ id=self._module.params['id'],
+ storage_hard_limit_pct=self._module.params.get('storage_grace'),
+ storage_soft_limit_pct=self._module.params.get('storage_threshold'),
+ cluster_hard_limit_pct=self._module.params.get('cluster_grace'),
+ cluster_soft_limit_pct=self._module.params.get('cluster_threshold'),
+ )
+
+ def update_storage_limits(self, entity):
+ new_limits = {}
+ for storage in self._module.params.get('storages'):
+ new_limits[storage.get('name', '')] = {
+ 'size': storage.get('size'),
+ }
+
+ old_limits = {}
+ sd_limit_service = self._service.service(entity.id).quota_storage_limits_service()
+ for limit in sd_limit_service.list():
+ storage = get_link_name(self._connection, limit.storage_domain) if limit.storage_domain else ''
+ old_limits[storage] = {
+ 'size': limit.limit,
+ }
+ sd_limit_service.service(limit.id).remove()
+
+ return new_limits == old_limits
+
+ def update_cluster_limits(self, entity):
+ new_limits = {}
+ for cluster in self._module.params.get('clusters'):
+ new_limits[cluster.get('name', '')] = {
+ 'cpu': int(cluster.get('cpu')),
+ 'memory': float(cluster.get('memory')),
+ }
+
+ old_limits = {}
+ cl_limit_service = self._service.service(entity.id).quota_cluster_limits_service()
+ for limit in cl_limit_service.list():
+ cluster = get_link_name(self._connection, limit.cluster) if limit.cluster else ''
+ old_limits[cluster] = {
+ 'cpu': limit.vcpu_limit,
+ 'memory': limit.memory_limit,
+ }
+ cl_limit_service.service(limit.id).remove()
+
+ return new_limits == old_limits
+
+ def update_check(self, entity):
+ # -- FIXME --
+ # Note that we here always remove all cluster/storage limits, because
+ # it's not currently possible to update them and then re-create the limits
+ # appropriately, this shouldn't have any side-effects, but it's not considered
+ # as a correct approach.
+ # This feature is tracked here: https://bugzilla.redhat.com/show_bug.cgi?id=1398576
+ #
+
+ return (
+ self.update_storage_limits(entity) and
+ self.update_cluster_limits(entity) and
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('storage_grace'), entity.storage_hard_limit_pct) and
+ equal(self._module.params.get('storage_threshold'), entity.storage_soft_limit_pct) and
+ equal(self._module.params.get('cluster_grace'), entity.cluster_hard_limit_pct) and
+ equal(self._module.params.get('cluster_threshold'), entity.cluster_soft_limit_pct)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(required=True),
+ data_center=dict(required=True),
+ description=dict(default=None),
+ cluster_threshold=dict(default=None, type='int', aliases=['cluster_soft_limit']),
+ cluster_grace=dict(default=None, type='int', aliases=['cluster_hard_limit']),
+ storage_threshold=dict(default=None, type='int', aliases=['storage_soft_limit']),
+ storage_grace=dict(default=None, type='int', aliases=['storage_hard_limit']),
+ clusters=dict(default=[], type='list', elements='dict'),
+ storages=dict(default=[], type='list', elements='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ dc_name = module.params['data_center']
+ dc_id = getattr(search_by_name(datacenters_service, dc_name), 'id', None)
+ if dc_id is None:
+ raise Exception("Datacenter '%s' was not found." % dc_name)
+
+ quotas_service = datacenters_service.service(dc_id).quotas_service()
+ quotas_module = QuotasModule(
+ connection=connection,
+ module=module,
+ service=quotas_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = quotas_module.create()
+
+ # Manage cluster limits:
+ cl_limit_service = quotas_service.service(ret['id']).quota_cluster_limits_service()
+ for cluster in module.params.get('clusters'):
+ cl_limit_service.add(
+ limit=otypes.QuotaClusterLimit(
+ memory_limit=float(cluster.get('memory')),
+ vcpu_limit=int(cluster.get('cpu')),
+ cluster=search_by_name(
+ connection.system_service().clusters_service(),
+ cluster.get('name')
+ ),
+ ),
+ )
+
+ # Manage storage limits:
+ sd_limit_service = quotas_service.service(ret['id']).quota_storage_limits_service()
+ for storage in module.params.get('storages'):
+ sd_limit_service.add(
+ limit=otypes.QuotaStorageLimit(
+ limit=storage.get('size'),
+ storage_domain=search_by_name(
+ connection.system_service().storage_domains_service(),
+ storage.get('name')
+ ),
+ )
+ )
+
+ elif state == 'absent':
+ ret = quotas_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota_info.py
new file mode 100644
index 000000000..078641cce
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota_info.py
@@ -0,0 +1,148 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_quota_info
+short_description: Retrieve information about one or more oVirt/RHV quotas
+version_added: "1.0.0"
+author: "Maor Lipchuk (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV quotas."
+ - This module was called C(ovirt_quota_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_quota_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_quotas), which
+ contains a list of quotas. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ data_center:
+ description:
+ - "Name of the datacenter where quota resides."
+ required: true
+ type: str
+ name:
+ description:
+ - "Name of the quota, can be used as glob expression."
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/quota/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about quota named C<myquota> in Default datacenter:
+- ovirt.ovirt.ovirt_quota_info:
+ data_center: Default
+ name: myquota
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_quotas }}"
+'''
+
+RETURN = '''
+ovirt_quotas:
+ description: "List of dictionaries describing the quotas. Quota attributes are mapped to dictionary keys,
+ all quotas attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/quota."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ data_center=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ dc_name = module.params['data_center']
+ dc = search_by_name(datacenters_service, dc_name)
+ if dc is None:
+ raise Exception("Datacenter '%s' was not found." % dc_name)
+
+ quotas_service = datacenters_service.service(dc.id).quotas_service()
+ if module.params['name']:
+ quotas = [
+ e for e in quotas_service.list(follow=",".join(module.params['follow']))
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ quotas = quotas_service.list(follow=",".join(module.params['follow']))
+
+ result = dict(
+ ovirt_quotas=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in quotas
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_role.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_role.py
new file mode 100644
index 000000000..600840546
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_role.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_role
+short_description: Module to manage roles in oVirt/RHV
+version_added: "1.0.0"
+author: "Martin Necas (@mnecas)"
+description:
+ - "Module to manage roles in oVirt/RHV."
+options:
+ name:
+ description:
+ - "Name of the role to manage."
+ type: str
+ id:
+ description:
+ - "ID of the role to manage."
+ type: str
+ description:
+ description:
+ - "Description of the role."
+ type: str
+ state:
+ description:
+ - "Should the role be present/absent."
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ administrative:
+ description:
+ - "Defines the role as administrative-only or not."
+ type: bool
+ default: false
+ permits:
+ description:
+ - "List of permits which role will have"
+ - "Permit 'login' is default and all roles will have it."
+ - "List can contain name of permit."
+ type: list
+ elements: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create administrative role with two permits
+- ovirt.ovirt.ovirt_role:
+ name: role
+ administrative: true
+ permits:
+ - manipulate_permissions
+ - create_instance
+
+# Remove role
+- ovirt.ovirt.ovirt_role:
+ name: role
+ state: absent
+
+# Remove all permit
+- ovirt.ovirt.ovirt_role:
+ name: role
+ administrative: ture
+ permits:
+ - login
+'''
+
+RETURN = '''
+ovirt_role:
+ description: "List of dictionaries describing the Roles. Role attributes are mapped to dictionary keys,
+ all Roles attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/role."
+ returned: On success.
+ type: list
+'''
+
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ convert_to_bytes,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ get_link_name,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+)
+from ansible.module_utils.basic import AnsibleModule
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+
+class RoleModule(BaseModule):
+ def build_entity(self):
+ if 'login' not in self.param('permits'):
+ self.param('permits').append('login')
+ all_permits = self.get_all_permits()
+ return otypes.Role(
+ id=self.param('id'),
+ name=self.param('name'),
+ administrative=self.param('administrative') if self.param(
+ 'administrative') else None,
+ permits=[
+ otypes.Permit(id=all_permits.get(new_permit)) for new_permit in self.param('permits')
+ ] if self.param('permits') else None,
+ description=self.param('description') if self.param('administrative') else None,
+ )
+
+ def get_all_permits(self):
+ return dict((permit.name, permit.id) for permit in self._connection.system_service().cluster_levels_service().level_service('4.3').get().permits)
+
+ def update_check(self, entity):
+ def check_permits():
+ if self.param('permits'):
+ if 'login' not in self.param('permits'):
+ self.param('permits').append('login')
+ permits_service = self._service.service(entity.id).permits_service()
+ current = [er.name for er in permits_service.list()]
+ passed = self.param('permits')
+ if not sorted(current) == sorted(passed):
+ if self._module.check_mode:
+ return False
+ # remove all
+ for permit in permits_service.list():
+ permits_service.permit_service(permit.id).remove()
+ # add passed permits
+ all_permits = self.get_all_permits()
+ for new_permit in passed:
+ permits_service.add(otypes.Permit(id=all_permits.get(new_permit)))
+ return False
+ return True
+
+ return (
+ check_permits() and
+ equal(self.param('administrative'), entity.administrative) and
+ equal(self.param('description'), entity.description)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(default=None),
+ description=dict(default=None),
+ administrative=dict(type='bool', default=False),
+ permits=dict(type='list', default=[], elements='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['id', 'name']],
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ roles_service = connection.system_service().roles_service()
+ roles_module = RoleModule(
+ connection=connection,
+ module=module,
+ service=roles_service,
+ )
+ state = module.params['state']
+ if state == 'present':
+ ret = roles_module.create()
+ elif state == 'absent':
+ ret = roles_module.remove()
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_scheduling_policy_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_scheduling_policy_info.py
new file mode 100644
index 000000000..cc1434efa
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_scheduling_policy_info.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_scheduling_policy_info
+short_description: Retrieve information about one or more oVirt scheduling policies
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt scheduling policies."
+ - This module was called C(ovirt_scheduling_policy_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_scheduling_policy_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_scheduling_policies),
+ which contains a list of scheduling policies. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ id:
+ description:
+ - "ID of the scheduling policy."
+ type: str
+ name:
+ description:
+ - "Name of the scheduling policy, can be used as glob expression."
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url:
+ https://ovirt.github.io/ovirt-engine-api-model/master/#types/scheduling_policy/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all scheduling policies with name InClusterUpgrade:
+- ovirt.ovirt.ovirt_scheduling_policy_info:
+ name: InClusterUpgrade
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_scheduling_policies }}"
+'''
+
+RETURN = '''
+ovirt_scheduling_policies:
+ description: "List of dictionaries describing the scheduling policies.
+ Scheduling policies attributes are mapped to dictionary keys,
+ all scheduling policies attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ id=dict(default=None),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ system_service = connection.system_service()
+ sched_policies_service = system_service.scheduling_policies_service()
+ if module.params['name']:
+ sched_policies = [
+ e for e in sched_policies_service.list(follow=",".join(module.params['follow']))
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ elif module.params['id']:
+ sched_policies = [
+ sched_policies_service.service(module.params['id']).get()
+ ]
+ else:
+ sched_policies = sched_policies_service.list(follow=",".join(module.params['follow']))
+
+ result = dict(
+ ovirt_scheduling_policies=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in sched_policies
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot.py
new file mode 100644
index 000000000..a40119dea
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot.py
@@ -0,0 +1,571 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_snapshot
+short_description: "Module to manage Virtual Machine Snapshots in oVirt/RHV"
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage Virtual Machine Snapshots in oVirt/RHV"
+options:
+ snapshot_id:
+ description:
+ - "ID of the snapshot to manage."
+ type: str
+ vm_name:
+ description:
+ - "Name of the Virtual Machine to manage. Required one of C(vm_name) or C(vm_id)."
+ type: str
+ vm_id:
+ description:
+ - "ID of the Virtual Machine to manage. Required one of C(vm_name) or C(vm_id)."
+ type: str
+ version_added: "2.2.0"
+ state:
+ description:
+ - "Should the Virtual Machine snapshot be restore/present/absent."
+ choices: ['restore', 'present', 'absent']
+ default: present
+ type: str
+ description:
+ description:
+ - "Description of the snapshot."
+ type: str
+ disk_id:
+ description:
+ - "Disk id which you want to upload or download"
+ - "To get disk, you need to define disk_id or disk_name"
+ type: str
+ disk_name:
+ description:
+ - "Disk name which you want to upload or download"
+ type: str
+ download_image_path:
+ description:
+ - "Path on a file system where snapshot should be downloaded."
+ - "Note that you must have an valid oVirt/RHV engine CA in your system trust store
+ or you must provide it in C(ca_file) parameter."
+ - "Note that the snapshot is not downloaded when the file already exists,
+ but you can forcibly download the snapshot when using C(force) I (true)."
+ type: str
+ upload_image_path:
+ description:
+ - "Path to disk image, which should be uploaded."
+ type: str
+ use_memory:
+ description:
+ - "If I(true) and C(state) is I(present) save memory of the Virtual
+ Machine if it's running."
+ - "If I(true) and C(state) is I(restore) restore memory of the
+ Virtual Machine."
+ - "Note that Virtual Machine will be paused while saving the memory."
+ aliases:
+ - "restore_memory"
+ - "save_memory"
+ type: bool
+ keep_days_old:
+ description:
+ - "Number of days after which should snapshot be deleted."
+ - "It will check all snapshots of virtual machine and delete them, if they are older."
+ type: int
+ disks:
+ description:
+ - "List of disks which should be created with snapshot."
+ suboptions:
+ id:
+ description:
+ - "Id of the disk which should will be created."
+ type: str
+ name:
+ description:
+ - "Name of the disk which should will be created."
+ type: str
+ type: list
+ elements: dict
+notes:
+ - "Note that without a guest agent the data on the created snapshot may be
+ inconsistent."
+ - "Deleting a snapshot does not remove any information from the virtual
+ machine - it simply removes a return-point. However, restoring a virtual
+ machine from a snapshot deletes any content that was written to the
+ virtual machine after the time the snapshot was taken."
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create snapshot:
+- ovirt.ovirt.ovirt_snapshot:
+ vm_name: rhel7
+ description: MySnapshot
+ register: snapshot
+
+# Create snapshot and save memory:
+- ovirt.ovirt.ovirt_snapshot:
+ vm_name: rhel7
+ description: SnapWithMem
+ use_memory: true
+ register: snapshot
+
+# Restore snapshot:
+- ovirt.ovirt.ovirt_snapshot:
+ state: restore
+ vm_name: rhel7
+ snapshot_id: "{{ snapshot.id }}"
+
+# Remove snapshot:
+- ovirt.ovirt.ovirt_snapshot:
+ state: absent
+ vm_name: rhel7
+ snapshot_id: "{{ snapshot.id }}"
+
+# Upload local image to disk and attach it to vm:
+# Since Ansible 2.8
+- ovirt.ovirt.ovirt_snapshot:
+ name: mydisk
+ vm_name: myvm
+ upload_image_path: /path/to/mydisk.qcow2
+
+# Download snapshot to local file system:
+# Since Ansible 2.8
+- ovirt.ovirt.ovirt_snapshot:
+ snapshot_id: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+ disk_name: DiskName
+ vm_name: myvm
+ download_image_path: /home/user/mysnaphost.qcow2
+
+# Delete all snapshots older than 2 days
+- ovirt.ovirt.ovirt_snapshot:
+ vm_name: test
+ keep_days_old: 2
+
+- name: Select which disks should be add to snapshot
+ ovirt.ovirt.ovirt_snapshot:
+ vm_name: test
+ disks:
+ - id: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+ - name: my_disk_name
+'''
+
+
+RETURN = '''
+id:
+ description: ID of the snapshot which is managed
+ returned: On success if snapshot is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+snapshot:
+ description: "Dictionary of all the snapshot attributes. Snapshot attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
+ returned: On success if snapshot is found.
+ type: dict
+snapshots:
+ description: List of deleted snapshots when keep_days_old is defined and snapshot is older than the input days
+ returned: On success returns deleted snapshots
+ type: list
+'''
+
+
+import traceback
+
+try:
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+
+import os
+import ssl
+import time
+
+from ansible.module_utils.six.moves.http_client import HTTPSConnection, IncompleteRead
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+from datetime import datetime
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ get_entity,
+ ovirt_full_argument_spec,
+ search_by_name,
+ wait,
+ get_id_by_name,
+ get_link_name
+)
+
+
+def transfer(connection, module, direction, transfer_func):
+ transfers_service = connection.system_service().image_transfers_service()
+ transfer = transfers_service.add(
+ otypes.ImageTransfer(
+ image=otypes.Image(
+ id=module.params['disk_id'],
+ ),
+ direction=direction,
+ )
+ )
+ transfer_service = transfers_service.image_transfer_service(transfer.id)
+
+ try:
+ # After adding a new transfer for the disk, the transfer's status will be INITIALIZING.
+ # Wait until the init phase is over. The actual transfer can start when its status is "Transferring".
+ while transfer.phase == otypes.ImageTransferPhase.INITIALIZING:
+ time.sleep(module.params['poll_interval'])
+ transfer = transfer_service.get()
+
+ proxy_url = urlparse(transfer.proxy_url)
+ context = ssl.create_default_context()
+ auth = module.params['auth']
+ if auth.get('insecure'):
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+ elif auth.get('ca_file'):
+ context.load_verify_locations(cafile=auth.get('ca_file'))
+
+ proxy_connection = HTTPSConnection(
+ proxy_url.hostname,
+ proxy_url.port,
+ context=context,
+ )
+
+ transfer_func(
+ transfer_service,
+ proxy_connection,
+ proxy_url,
+ transfer.signed_ticket
+ )
+ return True
+ finally:
+ transfer_service.finalize()
+ while transfer.phase in [
+ otypes.ImageTransferPhase.TRANSFERRING,
+ otypes.ImageTransferPhase.FINALIZING_SUCCESS,
+ ]:
+ time.sleep(module.params['poll_interval'])
+ transfer = transfer_service.get()
+ if transfer.phase in [
+ otypes.ImageTransferPhase.UNKNOWN,
+ otypes.ImageTransferPhase.FINISHED_FAILURE,
+ otypes.ImageTransferPhase.FINALIZING_FAILURE,
+ otypes.ImageTransferPhase.CANCELLED,
+ ]:
+ raise Exception(
+ "Error occurred while uploading image. The transfer is in %s" % transfer.phase
+ )
+ if module.params.get('logical_unit'):
+ disks_service = connection.system_service().disks_service()
+ wait(
+ service=disks_service.service(module.params['id']),
+ condition=lambda d: d.status == otypes.DiskStatus.OK,
+ wait=module.params['wait'],
+ timeout=module.params['timeout'],
+ )
+
+
+def upload_disk_image(connection, module):
+ def _transfer(transfer_service, proxy_connection, proxy_url, transfer_ticket):
+ BUF_SIZE = 128 * 1024
+ path = module.params['upload_image_path']
+
+ image_size = os.path.getsize(path)
+ proxy_connection.putrequest("PUT", proxy_url.path)
+ proxy_connection.putheader('Content-Length', "%d" % (image_size,))
+ proxy_connection.endheaders()
+ with open(path, "rb") as disk:
+ pos = 0
+ while pos < image_size:
+ to_read = min(image_size - pos, BUF_SIZE)
+ chunk = disk.read(to_read)
+ if not chunk:
+ transfer_service.pause()
+ raise RuntimeError("Unexpected end of file at pos=%d" % pos)
+ proxy_connection.send(chunk)
+ pos += len(chunk)
+
+ return transfer(
+ connection,
+ module,
+ otypes.ImageTransferDirection.UPLOAD,
+ transfer_func=_transfer,
+ )
+
+
+def download_disk_image(connection, module):
+ def _transfer(transfer_service, proxy_connection, proxy_url, transfer_ticket):
+ BUF_SIZE = 128 * 1024
+ transfer_headers = {
+ 'Authorization': transfer_ticket,
+ }
+ proxy_connection.request(
+ 'GET',
+ proxy_url.path,
+ headers=transfer_headers,
+ )
+ r = proxy_connection.getresponse()
+ path = module.params["download_image_path"]
+ image_size = int(r.getheader('Content-Length'))
+ with open(path, "wb") as mydisk:
+ pos = 0
+ while pos < image_size:
+ to_read = min(image_size - pos, BUF_SIZE)
+ chunk = r.read(to_read)
+ if not chunk:
+ raise RuntimeError("Socket disconnected")
+ mydisk.write(chunk)
+ pos += len(chunk)
+
+ return transfer(
+ connection,
+ module,
+ otypes.ImageTransferDirection.DOWNLOAD,
+ transfer_func=_transfer,
+ )
+
+
+def get_disk_attachment(disk, disk_attachments, connection):
+ for disk_attachment in disk_attachments:
+ if get_link_name(connection, disk_attachment.disk) == disk.get('name') or\
+ disk_attachment.disk.id == disk.get('id'):
+ return disk_attachment
+
+
+def create_snapshot(module, vm_service, snapshots_service, connection):
+ changed = False
+ snapshot = get_entity(
+ snapshots_service.snapshot_service(module.params['snapshot_id'])
+ )
+ if snapshot is None:
+ if not module.check_mode:
+ disk_attachments_id = set(
+ get_disk_attachment(disk, vm_service.disk_attachments_service().list(), connection).id
+ for disk in module.params.get('disks')
+ ) if module.params.get('disks') else None
+
+ snapshot = snapshots_service.add(
+ otypes.Snapshot(
+ description=module.params.get('description'),
+ persist_memorystate=module.params.get('use_memory'),
+ disk_attachments=[otypes.DiskAttachment(disk=otypes.Disk(id=da_id)) for da_id in disk_attachments_id] if disk_attachments_id else None
+ )
+ )
+ changed = True
+ wait(
+ service=snapshots_service.snapshot_service(snapshot.id),
+ condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK,
+ wait=module.params['wait'],
+ timeout=module.params['timeout'],
+ )
+ return {
+ 'changed': changed,
+ 'id': snapshot.id,
+ 'snapshot': get_dict_of_struct(snapshot),
+ }
+
+
+def remove_snapshot(module, vm_service, snapshots_service, snapshot_id=None):
+ changed = False
+ if not snapshot_id:
+ snapshot_id = module.params['snapshot_id']
+ snapshot = get_entity(
+ snapshots_service.snapshot_service(snapshot_id)
+ )
+
+ if snapshot:
+ snapshot_service = snapshots_service.snapshot_service(snapshot.id)
+ if not module.check_mode:
+ snapshot_service.remove()
+ changed = True
+ wait(
+ service=snapshot_service,
+ condition=lambda snapshot: snapshot is None,
+ wait=module.params['wait'],
+ timeout=module.params['timeout'],
+ )
+
+ return {
+ 'changed': changed,
+ 'id': snapshot.id if snapshot else None,
+ 'snapshot': get_dict_of_struct(snapshot),
+ }
+
+
+def restore_snapshot(module, vm_service, snapshots_service):
+ changed = False
+ snapshot_service = snapshots_service.snapshot_service(
+ module.params['snapshot_id']
+ )
+ snapshot = get_entity(snapshot_service)
+ if snapshot is None:
+ raise Exception(
+ "Snapshot with id '%s' doesn't exist" % module.params['snapshot_id']
+ )
+
+ if snapshot.snapshot_status != otypes.SnapshotStatus.IN_PREVIEW:
+ if not module.check_mode:
+ snapshot_service.restore(
+ restore_memory=module.params.get('use_memory'),
+ )
+ changed = True
+ else:
+ if not module.check_mode:
+ vm_service.commit_snapshot()
+ changed = True
+
+ if changed:
+ wait(
+ service=snapshot_service,
+ condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK,
+ wait=module.params['wait'],
+ timeout=module.params['timeout'],
+ )
+ return {
+ 'changed': changed,
+ 'id': snapshot.id if snapshot else None,
+ 'snapshot': get_dict_of_struct(snapshot),
+ }
+
+
+def get_snapshot_disk_id(module, snapshots_service):
+ snapshot_service = snapshots_service.snapshot_service(module.params.get('snapshot_id'))
+ snapshot_disks_service = snapshot_service.disks_service()
+
+ disk_id = ''
+ if module.params.get('disk_id'):
+ disk_id = module.params.get('disk_id')
+ elif module.params.get('disk_name'):
+ disk_id = get_id_by_name(snapshot_disks_service, module.params.get('disk_name'))
+ return disk_id
+
+
+def remove_old_snapshosts(module, vm_service, snapshots_service):
+ deleted_snapshots = []
+ changed = False
+ date_now = datetime.now()
+ for snapshot in snapshots_service.list():
+ if snapshot.vm is not None and snapshot.vm.name == module.params.get('vm_name'):
+ diff = date_now - snapshot.date.replace(tzinfo=None)
+ if diff.days >= module.params.get('keep_days_old'):
+ snapshot = remove_snapshot(module, vm_service, snapshots_service, snapshot.id).get('snapshot')
+ deleted_snapshots.append(snapshot)
+ changed = True
+ return dict(snapshots=deleted_snapshots, changed=changed)
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['restore', 'present', 'absent'],
+ default='present',
+ ),
+ vm_name=dict(default=None),
+ vm_id=dict(default=None),
+ snapshot_id=dict(default=None),
+ disks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(default=None, type='str'),
+ id=dict(default=None, type='str'),
+ )
+ ),
+ disk_id=dict(default=None),
+ disk_name=dict(default=None),
+ description=dict(default=None),
+ download_image_path=dict(default=None),
+ upload_image_path=dict(default=None),
+ keep_days_old=dict(default=None, type='int'),
+ use_memory=dict(
+ default=None,
+ type='bool',
+ aliases=['restore_memory', 'save_memory'],
+ ),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['vm_name', 'vm_id']],
+ required_if=[
+ ('state', 'absent', ['snapshot_id']),
+ ('state', 'restore', ['snapshot_id']),
+ ]
+ )
+
+ check_sdk(module)
+ ret = {}
+ auth = module.params['auth']
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+
+ if module.params.get('vm_id'):
+ try:
+ vm = vms_service.vm_service(module.params.get('vm_id')).get()
+ except sdk.NotFoundError:
+ module.fail_json(
+ msg="Vm '{vm_id}' doesn't exist.".format(vm_id=module.params.get('vm_id')),
+ )
+ elif module.params.get('vm_name'):
+ vm = search_by_name(vms_service, module.params.get('vm_name'))
+ if not vm:
+ module.fail_json(
+ msg="Vm '{name}' doesn't exist.".format(name=module.params.get('vm_name')),
+ )
+
+ vm_service = vms_service.vm_service(vm.id)
+ snapshots_service = vms_service.vm_service(vm.id).snapshots_service()
+ try:
+ state = module.params['state']
+ if state == 'present':
+ if module.params.get('disk_id') or module.params.get('disk_name'):
+ module.params['disk_id'] = get_snapshot_disk_id(module, snapshots_service)
+ if module.params['upload_image_path']:
+ ret['changed'] = upload_disk_image(connection, module)
+ if module.params['download_image_path']:
+ ret['changed'] = download_disk_image(connection, module)
+ if module.params.get('keep_days_old') is not None:
+ ret = remove_old_snapshosts(module, vm_service, snapshots_service)
+ else:
+ ret = create_snapshot(module, vm_service, snapshots_service, connection)
+ elif state == 'restore':
+ ret = restore_snapshot(module, vm_service, snapshots_service)
+ elif state == 'absent':
+ ret = remove_snapshot(module, vm_service, snapshots_service)
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot_info.py
new file mode 100644
index 000000000..ae493cc26
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot_info.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_snapshot_info
+short_description: Retrieve information about one or more oVirt/RHV virtual machine snapshots
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine snapshots."
+ - This module was called C(ovirt_snapshot_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_snapshot_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_snapshots), which
+ contains a list of snapshots. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM with snapshot."
+ required: true
+ type: str
+ description:
+ description:
+ - "Description of the snapshot, can be used as glob expression."
+ type: str
+ snapshot_id:
+ description:
+ - "Id of the snapshot we want to retrieve information about."
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all snapshots which description start with C(update) for VM named C(centos7):
+- ovirt.ovirt.ovirt_snapshot_info:
+ vm: centos7
+ description: update*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_snapshots }}"
+'''
+
+RETURN = '''
+ovirt_snapshots:
+ description: "List of dictionaries describing the snapshot. Snapshot attributes are mapped to dictionary keys,
+ all snapshot attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
+ returned: On success.
+ type: list
+'''
+
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(required=True),
+ description=dict(default=None),
+ snapshot_id=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vm_name = module.params['vm']
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ snapshots_service = vms_service.service(vm.id).snapshots_service()
+ if module.params['description']:
+ snapshots = [
+ e for e in snapshots_service.list(follow=",".join(module.params['follow']))
+ if fnmatch.fnmatch(e.description, module.params['description'])
+ ]
+ elif module.params['snapshot_id']:
+ snapshots = [
+ snapshots_service.snapshot_service(module.params['snapshot_id']).get()
+ ]
+ else:
+ snapshots = snapshots_service.list(follow=",".join(module.params['follow']))
+
+ result = dict(
+ ovirt_snapshots=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in snapshots
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_connection.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_connection.py
new file mode 100644
index 000000000..5bfbb786f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_connection.py
@@ -0,0 +1,300 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_connection
+short_description: Module to manage storage connections in oVirt
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage storage connections in oVirt"
+options:
+ id:
+ description:
+ - "Id of the storage connection to manage."
+ type: str
+ state:
+ description:
+ - "Should the storage connection be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ storage:
+ description:
+ - "Name of the storage domain to be used with storage connection."
+ type: str
+ address:
+ description:
+ - "Address of the storage server. E.g.: myserver.mydomain.com"
+ type: str
+ path:
+ description:
+ - "Path of the mount point of the storage. E.g.: /path/to/my/data"
+ type: str
+ nfs_version:
+ description:
+ - "NFS version. One of: I(auto), I(v3), I(v4) or I(v4_1)."
+ type: str
+ nfs_timeout:
+ description:
+ - "The time in tenths of a second to wait for a response before retrying NFS requests. Range 0 to 65535."
+ type: int
+ nfs_retrans:
+ description:
+ - "The number of times to retry a request before attempting further recovery actions. Range 0 to 65535."
+ type: int
+ mount_options:
+ description:
+ - "Option which will be passed when mounting storage."
+ type: str
+ password:
+ description:
+ - "A CHAP password for logging into a target."
+ type: str
+ username:
+ description:
+ - "A CHAP username for logging into a target."
+ type: str
+ port:
+ description:
+ - "Port of the iSCSI storage server."
+ type: int
+ target:
+ description:
+ - "The target IQN for the storage device."
+ type: str
+ type:
+ description:
+ - "Storage type. For example: I(nfs), I(iscsi), etc."
+ type: str
+ vfs_type:
+ description:
+ - "Virtual File System type."
+ type: str
+ force:
+ description:
+ - "This parameter is relevant only when updating a connection."
+ - "If I(true) the storage domain don't have to be in I(MAINTENANCE)
+ state, so the storage connection is updated."
+ type: bool
+ default: false
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add new storage connection:
+- ovirt.ovirt.ovirt_storage_connection:
+ storage: myiscsi
+ address: 10.34.63.199
+ target: iqn.2016-08-09.domain-01:nickname
+ port: 3260
+ type: iscsi
+
+# Update the existing storage connection address:
+- ovirt.ovirt.ovirt_storage_connection:
+ id: 26915c96-92ff-47e5-9e77-b581db2f2d36
+ address: 10.34.63.204
+ force: true
+
+# Remove storage connection:
+- ovirt.ovirt.ovirt_storage_connection:
+ id: 26915c96-92ff-47e5-9e77-b581db2f2d36
+'''
+
+RETURN = '''
+id:
+ description: ID of the storage connection which is managed
+ returned: On success if storage connection is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+storage_connection:
+ description: "Dictionary of all the storage connection attributes. Storage connection attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/storage_connection."
+ returned: On success if storage connection is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+class StorageConnectionModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.StorageConnection(
+ address=self.param('address'),
+ path=self.param('path'),
+ nfs_version=otypes.NfsVersion(
+ self.param('nfs_version')
+ ) if self.param('nfs_version') is not None else None,
+ nfs_timeo=self.param('nfs_timeout'),
+ nfs_retrans=self.param('nfs_retrans'),
+ mount_options=self.param('mount_options'),
+ password=self.param('password'),
+ username=self.param('username'),
+ port=self.param('port'),
+ target=self.param('target'),
+ type=otypes.StorageType(
+ self.param('type')
+ ) if self.param('type') is not None else None,
+ vfs_type=self.param('vfs_type'),
+ )
+
+ def _get_storage_domain_service(self):
+ sds_service = self._connection.system_service().storage_domains_service()
+ sd = search_by_name(sds_service, self.param('storage'))
+ if sd is None:
+ raise Exception(
+ "Storage '%s' was not found." % self.param('storage')
+ )
+ return sd, sds_service.storage_domain_service(sd.id)
+
+ def post_present(self, entity_id):
+ if self.param('storage'):
+ sd, sd_service = self._get_storage_domain_service()
+ if entity_id not in [
+ sd_conn.id for sd_conn in self._connection.follow_link(sd.storage_connections)
+ ]:
+ scs_service = sd_service.storage_connections_service()
+ if not self._module.check_mode:
+ scs_service.add(
+ connection=otypes.StorageConnection(
+ id=entity_id,
+ ),
+ )
+ self.changed = True
+
+ def pre_remove(self, entity):
+ if self.param('storage'):
+ sd, sd_service = self._get_storage_domain_service()
+ if entity in [
+ sd_conn.id for sd_conn in self._connection.follow_link(sd.storage_connections)
+ ]:
+ scs_service = sd_service.storage_connections_service()
+ sc_service = scs_service.connection_service(entity)
+ if not self._module.check_mode:
+ sc_service.remove()
+ self.changed = True
+
+ def update_check(self, entity):
+ return (
+ equal(self.param('address'), entity.address) and
+ equal(self.param('path'), entity.path) and
+ equal(self.param('nfs_version'), str(entity.nfs_version)) and
+ equal(self.param('nfs_timeout'), entity.nfs_timeo) and
+ equal(self.param('nfs_retrans'), entity.nfs_retrans) and
+ equal(self.param('mount_options'), entity.mount_options) and
+ equal(self.param('username'), entity.username) and
+ equal(self.param('port'), entity.port) and
+ equal(self.param('target'), entity.target) and
+ equal(self.param('type'), str(entity.type)) and
+ equal(self.param('vfs_type'), entity.vfs_type)
+ )
+
+
+def find_sc_by_attributes(module, storage_connections_service):
+ for sd_conn in [
+ sc for sc in storage_connections_service.list()
+ if str(sc.type) == module.params['type']
+ ]:
+ sd_conn_type = str(sd_conn.type)
+ if sd_conn_type in ['nfs', 'posixfs', 'glusterfs', 'localfs']:
+ if (
+ module.params['address'] == sd_conn.address and
+ module.params['path'] == sd_conn.path
+ ):
+ return sd_conn
+ elif sd_conn_type in ['iscsi', 'fcp']:
+ if (
+ module.params['address'] == sd_conn.address and
+ module.params['target'] == sd_conn.target
+ ):
+ return sd_conn
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ id=dict(default=None),
+ address=dict(default=None),
+ path=dict(default=None),
+ nfs_version=dict(default=None),
+ nfs_timeout=dict(default=None, type='int'),
+ nfs_retrans=dict(default=None, type='int'),
+ mount_options=dict(default=None),
+ password=dict(default=None, no_log=True),
+ username=dict(default=None),
+ port=dict(default=None, type='int'),
+ target=dict(default=None),
+ type=dict(default=None),
+ vfs_type=dict(default=None),
+ force=dict(type='bool', default=False),
+ storage=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_connections_service = connection.system_service().storage_connections_service()
+ storage_connection_module = StorageConnectionModule(
+ connection=connection,
+ module=module,
+ service=storage_connections_service,
+ )
+ entity = None
+ if module.params['id'] is None:
+ entity = find_sc_by_attributes(module, storage_connections_service)
+
+ state = module.params['state']
+ if state == 'present':
+ ret = storage_connection_module.create(
+ entity=entity,
+ update_params={'force': True},
+ )
+ storage_connection_module.post_present(ret['id'])
+ elif state == 'absent':
+ storage_connection_module.pre_remove(module.params['id'])
+ ret = storage_connection_module.remove(entity=entity)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain.py
new file mode 100644
index 000000000..ff6fed88c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain.py
@@ -0,0 +1,849 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_domain
+short_description: Module to manage storage domains in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage storage domains in oVirt/RHV"
+options:
+ id:
+ description:
+ - "Id of the storage domain to be imported."
+ type: str
+ name:
+ description:
+ - "Name of the storage domain to manage. (Not required when state is I(imported))"
+ type: str
+ state:
+ description:
+ - "Should the storage domain be present/absent/maintenance/unattached/imported/update_ovf_store"
+ - "I(imported) is supported since version 2.4."
+ - "I(update_ovf_store) is supported since version 2.5, currently if C(wait) is (true), we don't wait for update."
+ choices: ['present', 'absent', 'maintenance', 'unattached', 'imported', 'update_ovf_store']
+ default: present
+ type: str
+ description:
+ description:
+ - "Description of the storage domain."
+ type: str
+ comment:
+ description:
+ - "Comment of the storage domain."
+ type: str
+ data_center:
+ description:
+ - "Data center name where storage domain should be attached."
+ - "This parameter isn't idempotent, it's not possible to change data center of storage domain."
+ type: str
+ domain_function:
+ description:
+ - "Function of the storage domain."
+ - "This parameter isn't idempotent, it's not possible to change domain function of storage domain."
+ choices: ['data', 'iso', 'export']
+ default: 'data'
+ aliases: ['type']
+ type: str
+ host:
+ description:
+ - "Host to be used to mount storage."
+ type: str
+ localfs:
+ description:
+ - "Dictionary with values for localfs storage type:"
+ - "Note that these parameters are not idempotent."
+ suboptions:
+ path:
+ description:
+ - "Path of the mount point. E.g.: /path/to/my/data"
+ type: dict
+ nfs:
+ description:
+ - "Dictionary with values for NFS storage type:"
+ - "Note that these parameters are not idempotent."
+ type: dict
+ suboptions:
+ address:
+ description:
+ - "Address of the NFS server. E.g.: myserver.mydomain.com"
+ path:
+ description:
+ - "Path of the mount point. E.g.: /path/to/my/data"
+ version:
+ description:
+ - "NFS version. One of: I(auto), I(v3), I(v4) or I(v4_1)."
+ timeout:
+ description:
+ - "The time in tenths of a second to wait for a response before retrying NFS requests. Range 0 to 65535."
+ retrans:
+ description:
+ - "The number of times to retry a request before attempting further recovery actions. Range 0 to 65535."
+ mount_options:
+ description:
+ - "Option which will be passed when mounting storage."
+ iscsi:
+ description:
+ - "Dictionary with values for iSCSI storage type:"
+ - "Note that these parameters are not idempotent."
+ type: dict
+ suboptions:
+ address:
+ description:
+ - Address of the iSCSI storage server.
+ port:
+ description:
+ - Port of the iSCSI storage server.
+ target:
+ description:
+ - The target IQN for the storage device.
+ lun_id:
+ description:
+ - LUN id(s).
+ username:
+ description:
+ - A CHAP user name for logging into a target.
+ password:
+ description:
+ - A CHAP password for logging into a target.
+ override_luns:
+ description:
+ - If I(True) ISCSI storage domain luns will be overridden before adding.
+ type: bool
+ target_lun_map:
+ description:
+ - List of dictionary containing targets and LUNs.
+ posixfs:
+ description:
+ - "Dictionary with values for PosixFS storage type:"
+ - "Note that these parameters are not idempotent."
+ type: dict
+ suboptions:
+ path:
+ description:
+ - "Path of the mount point. E.g.: /path/to/my/data"
+ vfs_type:
+ description:
+ - Virtual File System type.
+ mount_options:
+ description:
+ - Option which will be passed when mounting storage.
+ glusterfs:
+ description:
+ - "Dictionary with values for GlusterFS storage type:"
+ - "Note that these parameters are not idempotent."
+ type: dict
+ suboptions:
+ address:
+ description:
+ - "Address of the Gluster server. E.g.: myserver.mydomain.com"
+ path:
+ description:
+ - "Path of the mount point. E.g.: /path/to/my/data"
+ mount_options:
+ description:
+ - Option which will be passed when mounting storage.
+ managed_block_storage:
+ description:
+ - "Dictionary with values for managed block storage type"
+ - "Note: available from ovirt 4.3"
+ type: dict
+ suboptions:
+ driver_options:
+ description:
+ - "The options to be passed when creating a storage domain using a cinder driver."
+ - "List of dictionary containing C(name) and C(value) of driver option"
+ type: list
+ elements: dict
+ driver_sensitive_options:
+ description:
+ - "Parameters containing sensitive information, to be passed when creating a storage domain using a cinder driver."
+ - "List of dictionary containing C(name) and C(value) of driver sensitive option"
+ type: list
+ elements: dict
+ fcp:
+ description:
+ - "Dictionary with values for fibre channel storage type:"
+ - "Note that these parameters are not idempotent."
+ type: dict
+ suboptions:
+ lun_id:
+ description:
+ - LUN id.
+ override_luns:
+ description:
+ - If I(True) FCP storage domain LUNs will be overridden before adding.
+ type: bool
+ wipe_after_delete:
+ description:
+ - "Boolean flag which indicates whether the storage domain should wipe the data after delete."
+ type: bool
+ backup:
+ description:
+ - "Boolean flag which indicates whether the storage domain is configured as backup or not."
+ type: bool
+ critical_space_action_blocker:
+ description:
+ - "Indicates the minimal free space the storage domain should contain in percentages."
+ type: int
+ warning_low_space:
+ description:
+ - "Indicates the minimum percentage of a free space in a storage domain to present a warning."
+ type: int
+ destroy:
+ description:
+ - "Logical remove of the storage domain. If I(true) retains the storage domain's data for import."
+ - "This parameter is relevant only when C(state) is I(absent)."
+ type: bool
+ format:
+ description:
+ - "If I(True) storage domain will be formatted after removing it from oVirt/RHV."
+ - "This parameter is relevant only when C(state) is I(absent)."
+ type: bool
+ discard_after_delete:
+ description:
+ - "If I(True) storage domain blocks will be discarded upon deletion. Enabled by default."
+ - "This parameter is relevant only for block based storage domains."
+ type: bool
+ storage_format:
+ description:
+ - "One of v1, v2, v3, v4, v5 - sets the storage format of the domain."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add data NFS storage domain
+- ovirt.ovirt.ovirt_storage_domain:
+ name: data_nfs
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/data
+
+# Add data NFS storage domain with id for data center
+- ovirt.ovirt.ovirt_storage_domain:
+ name: data_nfs
+ host: myhost
+ data_center: 11111
+ nfs:
+ address: 10.34.63.199
+ path: /path/data
+ mount_options: noexec,nosuid
+
+# Add data NFS storage domain in an older format
+# E.g. the following will work if the data center is in 4.2 level.
+# Without this, you might get as error like:
+# Cannot attach Storage. Storage Domain format V5 is illegal.
+- ovirt.ovirt.ovirt_storage_domain:
+ name: data_nfs
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/data
+ storage_format: v4
+
+# Add data localfs storage domain
+- ovirt.ovirt.ovirt_storage_domain:
+ name: data_localfs
+ host: myhost
+ data_center: mydatacenter
+ localfs:
+ path: /path/to/data
+
+# Add data iSCSI storage domain:
+- ovirt.ovirt.ovirt_storage_domain:
+ name: data_iscsi
+ host: myhost
+ data_center: mydatacenter
+ iscsi:
+ target: iqn.2016-08-09.domain-01:nickname
+ lun_id:
+ - 1IET_000d0001
+ - 1IET_000d0002
+ address: 10.34.63.204
+ discard_after_delete: True
+ backup: False
+ critical_space_action_blocker: 5
+ warning_low_space: 10
+
+# Since Ansible 2.5 you can specify multiple targets for storage domain,
+# Add data iSCSI storage domain with multiple targets:
+- ovirt.ovirt.ovirt_storage_domain:
+ name: data_iscsi
+ host: myhost
+ data_center: mydatacenter
+ iscsi:
+ target_lun_map:
+ - target: iqn.2016-08-09.domain-01:nickname
+ lun_id: 1IET_000d0001
+ - target: iqn.2016-08-09.domain-02:nickname
+ lun_id: 1IET_000d0002
+ address: 10.34.63.204
+ discard_after_delete: True
+
+# Add data glusterfs storage domain
+- ovirt.ovirt.ovirt_storage_domain:
+ name: glusterfs_1
+ host: myhost
+ data_center: mydatacenter
+ glusterfs:
+ address: 10.10.10.10
+ path: /path/data
+
+# Create export NFS storage domain:
+- ovirt.ovirt.ovirt_storage_domain:
+ name: myexportdomain
+ domain_function: export
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/export
+ wipe_after_delete: False
+ backup: True
+ critical_space_action_blocker: 2
+ warning_low_space: 5
+
+# Import export NFS storage domain:
+- ovirt.ovirt.ovirt_storage_domain:
+ state: imported
+ domain_function: export
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/export
+
+# Import FCP storage domain:
+- ovirt.ovirt.ovirt_storage_domain:
+ state: imported
+ name: data_fcp
+ host: myhost
+ data_center: mydatacenter
+ fcp: {}
+
+# Update OVF_STORE:
+- ovirt.ovirt.ovirt_storage_domain:
+ state: update_ovf_store
+ name: domain
+
+# Create ISO NFS storage domain
+- ovirt.ovirt.ovirt_storage_domain:
+ name: myiso
+ domain_function: iso
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/iso
+
+# Create managed storage domain
+# Available from ovirt 4.3 and ansible 2.9
+- ovirt.ovirt.ovirt_storage_domain:
+ name: my_managed_domain
+ host: myhost
+ data_center: mydatacenter
+ managed_block_storage:
+ driver_options:
+ - name: rbd_pool
+ value: pool1
+ - name: rbd_user
+ value: admin
+ - name: volume_driver
+ value: cinder.volume.drivers.rbd.RBDDriver
+ - name: rbd_keyring_conf
+ value: /etc/ceph/keyring
+ driver_sensitive_options:
+ - name: secret_password
+ value: password
+
+# Remove storage domain
+- ovirt.ovirt.ovirt_storage_domain:
+ state: absent
+ name: mystorage_domain
+ format: true
+'''
+
+RETURN = '''
+id:
+ description: ID of the storage domain which is managed
+ returned: On success if storage domain is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+storage_domain:
+ description: "Dictionary of all the storage domain attributes. Storage domain attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain."
+ returned: On success if storage domain is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+
+ from ovirtsdk4.types import StorageDomainStatus as sdstate
+ from ovirtsdk4.types import HostStatus as hoststate
+ from ovirtsdk4.types import DataCenterStatus as dcstatus
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_entity,
+ get_id_by_name,
+ OvirtRetry,
+ ovirt_full_argument_spec,
+ search_by_name,
+ search_by_attributes,
+ wait,
+)
+
+
+class StorageDomainModule(BaseModule):
+
+ def _get_storage_type(self):
+ for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp', 'localfs', 'managed_block_storage']:
+ if self.param(sd_type) is not None:
+ return sd_type
+
+ def _get_storage(self):
+ for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp', 'localfs', 'managed_block_storage']:
+ if self.param(sd_type) is not None:
+ return self.param(sd_type)
+
+ def _get_storage_format(self):
+ if self.param('storage_format') is not None:
+ for sd_format in otypes.StorageFormat:
+ if self.param('storage_format').lower() == str(sd_format):
+ return sd_format
+
+ def _login(self, storage_type, storage):
+ if storage_type == 'iscsi':
+ hosts_service = self._connection.system_service().hosts_service()
+ host_id = get_id_by_name(hosts_service, self.param('host'))
+ if storage.get('target'):
+ hosts_service.host_service(host_id).iscsi_login(
+ iscsi=otypes.IscsiDetails(
+ username=storage.get('username'),
+ password=storage.get('password'),
+ address=storage.get('address'),
+ target=storage.get('target'),
+ ),
+ )
+ elif storage.get('target_lun_map'):
+ for target in [m['target'] for m in storage.get('target_lun_map')]:
+ hosts_service.host_service(host_id).iscsi_login(
+ iscsi=otypes.IscsiDetails(
+ username=storage.get('username'),
+ password=storage.get('password'),
+ address=storage.get('address'),
+ target=target,
+ ),
+ )
+
+ def __target_lun_map(self, storage):
+ if storage.get('target'):
+ lun_ids = storage.get('lun_id') if isinstance(storage.get('lun_id'), list) else [(storage.get('lun_id'))]
+ return [(lun_id, storage.get('target')) for lun_id in lun_ids]
+ elif storage.get('target_lun_map'):
+ return [(target_map.get('lun_id'), target_map.get('target')) for target_map in storage.get('target_lun_map')]
+ else:
+ lun_ids = storage.get('lun_id') if isinstance(storage.get('lun_id'), list) else [(storage.get('lun_id'))]
+ return [(lun_id, None) for lun_id in lun_ids]
+
+ def build_entity(self):
+ storage_type = self._get_storage_type()
+ storage = self._get_storage()
+ self._login(storage_type, storage)
+
+ return otypes.StorageDomain(
+ name=self.param('name'),
+ description=self.param('description'),
+ comment=self.param('comment'),
+ wipe_after_delete=self.param('wipe_after_delete'),
+ backup=self.param('backup'),
+ critical_space_action_blocker=self.param('critical_space_action_blocker'),
+ warning_low_space_indicator=self.param('warning_low_space'),
+ import_=True if self.param('state') == 'imported' else None,
+ id=self.param('id') if self.param('state') == 'imported' else None,
+ type=otypes.StorageDomainType(storage_type if storage_type == 'managed_block_storage' else self.param('domain_function')),
+ host=otypes.Host(name=self.param('host')),
+ discard_after_delete=self.param('discard_after_delete'),
+ storage=otypes.HostStorage(
+ driver_options=[
+ otypes.Property(
+ name=do.get('name'),
+ value=do.get('value')
+ ) for do in storage.get('driver_options')
+ ] if storage.get('driver_options') else None,
+ driver_sensitive_options=[
+ otypes.Property(
+ name=dso.get('name'),
+ value=dso.get('value')
+ ) for dso in storage.get('driver_sensitive_options')
+ ] if storage.get('driver_sensitive_options') else None,
+ type=otypes.StorageType(storage_type),
+ logical_units=[
+ otypes.LogicalUnit(
+ id=lun_id,
+ address=storage.get('address'),
+ port=int(storage.get('port', 3260)),
+ target=target,
+ username=storage.get('username'),
+ password=storage.get('password'),
+ ) for lun_id, target in self.__target_lun_map(storage)
+ ] if storage_type in ['iscsi', 'fcp'] else None,
+ override_luns=storage.get('override_luns'),
+ mount_options=storage.get('mount_options'),
+ vfs_type=(
+ 'glusterfs'
+ if storage_type in ['glusterfs'] else storage.get('vfs_type')
+ ),
+ address=storage.get('address'),
+ path=storage.get('path'),
+ nfs_retrans=storage.get('retrans'),
+ nfs_timeo=storage.get('timeout'),
+ nfs_version=otypes.NfsVersion(
+ storage.get('version')
+ ) if storage.get('version') else None,
+ ) if storage_type is not None else None,
+ storage_format=self._get_storage_format(),
+ )
+
+ def _find_attached_datacenter_name(self, sd_name):
+ """
+ Finds the name of the datacenter that a given
+ storage domain is attached to.
+
+ Args:
+ sd_name (str): Storage Domain name
+
+ Returns:
+ str: Data Center name
+
+ Raises:
+ Exception: In case storage domain in not attached to
+ an active Datacenter
+ """
+ dcs_service = self._connection.system_service().data_centers_service()
+ dc = search_by_attributes(dcs_service, storage=sd_name)
+ if dc is None:
+ raise Exception(
+ "Can't bring storage to state `%s`, because it seems that"
+ "it is not attached to any datacenter"
+ % self.param('state')
+ )
+ else:
+ if dc.status == dcstatus.UP:
+ return dc.name
+ else:
+ raise Exception(
+ "Can't bring storage to state `%s`, because Datacenter "
+ "%s is not UP" % (self.param('state'), dc.name)
+ )
+
+ def _attached_sds_service(self, dc_name):
+ # Get data center object of the storage domain:
+ dcs_service = self._connection.system_service().data_centers_service()
+
+ # Search the data_center name, if it does not exist, try to search by guid.
+ dc = search_by_name(dcs_service, dc_name)
+ if dc is None:
+ dc = get_entity(dcs_service.service(dc_name))
+ if dc is None:
+ return None
+
+ dc_service = dcs_service.data_center_service(dc.id)
+ return dc_service.storage_domains_service()
+
+ def _attached_sd_service(self, storage_domain):
+ dc_name = self.param('data_center')
+ if not dc_name:
+ # Find the DC, where the storage resides:
+ dc_name = self._find_attached_datacenter_name(storage_domain.name)
+ attached_sds_service = self._attached_sds_service(dc_name)
+ attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id)
+ return attached_sd_service
+
+ def _maintenance(self, storage_domain):
+ attached_sd_service = self._attached_sd_service(storage_domain)
+ attached_sd = get_entity(attached_sd_service)
+
+ if attached_sd and attached_sd.status != sdstate.MAINTENANCE:
+ if not self._module.check_mode:
+ attached_sd_service.deactivate()
+ self.changed = True
+
+ wait(
+ service=attached_sd_service,
+ condition=lambda sd: sd.status == sdstate.MAINTENANCE,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def _unattach(self, storage_domain):
+ attached_sd_service = self._attached_sd_service(storage_domain)
+ attached_sd = get_entity(attached_sd_service)
+
+ if attached_sd and attached_sd.status == sdstate.MAINTENANCE:
+ if not self._module.check_mode:
+ # Detach the storage domain:
+ attached_sd_service.remove()
+ self.changed = True
+ # Wait until storage domain is detached:
+ wait(
+ service=attached_sd_service,
+ condition=lambda sd: sd is None,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def pre_remove(self, entity):
+ # In case the user chose to destroy the storage domain there is no need to
+ # move it to maintenance or detach it, it should simply be removed from the DB.
+ # Also if storage domain in already unattached skip this step.
+ if entity.status == sdstate.UNATTACHED or self.param('destroy'):
+ return
+ # Before removing storage domain we need to put it into maintenance state:
+ self._maintenance(entity)
+
+ # Before removing storage domain we need to detach it from data center:
+ self._unattach(entity)
+
+ def post_create_check(self, sd_id):
+ storage_domain = self._service.service(sd_id).get()
+ dc_name = self.param('data_center')
+ if not dc_name:
+ # Find the DC, where the storage resides:
+ dc_name = self._find_attached_datacenter_name(storage_domain.name)
+ self._service = self._attached_sds_service(dc_name)
+
+ # If storage domain isn't attached, attach it:
+ attached_sd_service = self._service.service(storage_domain.id)
+ if get_entity(attached_sd_service) is None:
+ self._service.add(
+ otypes.StorageDomain(
+ id=storage_domain.id,
+ ),
+ )
+ self.changed = True
+ # Wait until storage domain is in maintenance:
+ wait(
+ service=attached_sd_service,
+ condition=lambda sd: sd.status == sdstate.ACTIVE,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def unattached_pre_action(self, storage_domain):
+ dc_name = self.param('data_center')
+ if not dc_name:
+ # Find the DC, where the storage resides:
+ dc_name = self._find_attached_datacenter_name(storage_domain.name)
+ self._service = self._attached_sds_service(dc_name)
+ self._maintenance(storage_domain)
+
+ def update_check(self, entity):
+ return (
+ equal(self.param('comment'), entity.comment) and
+ equal(self.param('description'), entity.description) and
+ equal(self.param('backup'), entity.backup) and
+ equal(self.param('critical_space_action_blocker'), entity.critical_space_action_blocker) and
+ equal(self.param('discard_after_delete'), entity.discard_after_delete) and
+ equal(self.param('wipe_after_delete'), entity.wipe_after_delete) and
+ equal(self.param('warning_low_space'), entity.warning_low_space_indicator)
+ )
+
+
+def failed_state(sd):
+ return sd.status in [sdstate.UNKNOWN, sdstate.INACTIVE]
+
+
+def control_state(sd_module):
+ sd = sd_module.search_entity()
+ if sd is None:
+ return
+
+ sd_service = sd_module._service.service(sd.id)
+
+ # In the case of no status returned, it's an attached storage domain.
+ # Redetermine the corresponding service and entity:
+ if sd.status is None:
+ sd_service = sd_module._attached_sd_service(sd)
+ sd = get_entity(sd_service)
+
+ if sd is None:
+ return
+
+ if sd.status == sdstate.LOCKED:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status != sdstate.LOCKED,
+ fail_condition=failed_state,
+ )
+
+ if failed_state(sd):
+ raise Exception("Not possible to manage storage domain '%s'." % sd.name)
+ elif sd.status == sdstate.ACTIVATING:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status == sdstate.ACTIVE,
+ fail_condition=failed_state,
+ )
+ elif sd.status == sdstate.DETACHING:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status == sdstate.UNATTACHED,
+ fail_condition=failed_state,
+ )
+ elif sd.status == sdstate.PREPARING_FOR_MAINTENANCE:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status == sdstate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'maintenance', 'unattached', 'imported', 'update_ovf_store'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(default=None),
+ description=dict(default=None),
+ comment=dict(default=None),
+ data_center=dict(default=None),
+ domain_function=dict(choices=['data', 'iso', 'export'], default='data', aliases=['type']),
+ host=dict(default=None),
+ localfs=dict(default=None, type='dict'),
+ nfs=dict(default=None, type='dict'),
+ iscsi=dict(default=None, type='dict'),
+ managed_block_storage=dict(default=None, type='dict', options=dict(
+ driver_options=dict(type='list', elements='dict'),
+ driver_sensitive_options=dict(type='list', no_log=True, elements='dict'))),
+ posixfs=dict(default=None, type='dict'),
+ glusterfs=dict(default=None, type='dict'),
+ fcp=dict(default=None, type='dict'),
+ wipe_after_delete=dict(type='bool', default=None),
+ backup=dict(type='bool', default=None),
+ critical_space_action_blocker=dict(type='int', default=None),
+ warning_low_space=dict(type='int', default=None),
+ destroy=dict(type='bool', default=None),
+ format=dict(type='bool', default=None),
+ discard_after_delete=dict(type='bool', default=None),
+ storage_format=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domains_module = StorageDomainModule(
+ connection=connection,
+ module=module,
+ service=storage_domains_service,
+ )
+
+ state = module.params['state']
+ control_state(storage_domains_module)
+ if state == 'absent':
+ # Pick random available host when host parameter is missing
+ host_param = module.params['host']
+ if not host_param:
+ host = search_by_attributes(connection.system_service().hosts_service(), status='up')
+ if host is None:
+ raise Exception(
+ "Not possible to remove storage domain '%s' "
+ "because no host found with status `up`." % module.params['name']
+ )
+ host_param = host.name
+ ret = storage_domains_module.remove(
+ destroy=module.params['destroy'],
+ format=module.params['format'],
+ host=host_param,
+ )
+ elif state == 'present' or state == 'imported':
+ sd_id = storage_domains_module.create()['id']
+ storage_domains_module.post_create_check(sd_id)
+ ret = storage_domains_module.action(
+ action='activate',
+ action_condition=lambda s: s.status == sdstate.MAINTENANCE,
+ wait_condition=lambda s: s.status == sdstate.ACTIVE,
+ fail_condition=failed_state,
+ search_params={'id': sd_id} if state == 'imported' else None
+ )
+ elif state == 'maintenance':
+ sd_id = storage_domains_module.create()['id']
+ storage_domains_module.post_create_check(sd_id)
+
+ ret = OvirtRetry.backoff(tries=5, delay=1, backoff=2)(
+ storage_domains_module.action
+ )(
+ action='deactivate',
+ action_condition=lambda s: s.status == sdstate.ACTIVE,
+ wait_condition=lambda s: s.status == sdstate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+ elif state == 'unattached':
+ ret = storage_domains_module.create()
+ storage_domains_module.pre_remove(
+ entity=storage_domains_service.service(ret['id']).get()
+ )
+ ret['changed'] = storage_domains_module.changed
+ elif state == 'update_ovf_store':
+ ret = storage_domains_module.action(
+ action='update_ovf_store'
+ )
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain_info.py
new file mode 100644
index 000000000..3a2b5c328
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain_info.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_domain_info
+short_description: Retrieve information about one or more oVirt/RHV storage domains
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV storage domains."
+ - This module was called C(ovirt_storage_domain_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_storage_domain_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_storage_domains), which
+ contains a list of storage domains. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search storage domain X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all storage domains which names start with C(data) and
+# belong to data center C(west):
+- ovirt.ovirt.ovirt_storage_domain_info:
+ pattern: name=data* and datacenter=west
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_domains }}"
+'''
+
+RETURN = '''
+ovirt_storage_domains:
+ description: "List of dictionaries describing the storage domains. Storage_domain attributes are mapped to dictionary keys,
+ all storage domains attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domains = storage_domains_service.list(
+ search=module.params['pattern'],
+ follow=",".join(module.params['follow'])
+ )
+ result = dict(
+ ovirt_storage_domains=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in storage_domains
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_template_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_template_info.py
new file mode 100644
index 000000000..7b422871a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_template_info.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_template_info
+short_description: Retrieve information about one or more oVirt/RHV templates relate to a storage domain.
+version_added: "1.0.0"
+author: "Maor Lipchuk (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV templates relate to a storage domain."
+ - This module was called C(ovirt_storage_template_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_storage_template_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_storage_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered templates which contain one or more
+ disks which reside on a storage domain or diskless templates."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of templates to return. If not specified all the templates are returned."
+ type: int
+ storage_domain:
+ description:
+ - "The storage domain name where the templates should be listed."
+ type: str
+ required: true
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/template/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all Templates which relate to a storage domain and
+# are unregistered:
+- ovirt.ovirt.ovirt_storage_template_info:
+ unregistered: True
+ storage_domain: storage
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_templates }}"
+'''
+
+RETURN = '''
+ovirt_storage_templates:
+ description: "List of dictionaries describing the Templates. Template attributes are mapped to dictionary keys,
+ all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(type='str', required=True),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ templates_service = storage_domain_service.templates_service()
+
+ # Find the unregistered Template we want to register:
+ if module.params.get('unregistered'):
+ templates = templates_service.list(
+ unregistered=True,
+ follow=",".join(module.params['follow'])
+ )
+ else:
+ templates = templates_service.list(
+ max=module.params['max'],
+ follow=",".join(module.params['follow'])
+ )
+ result = dict(
+ ovirt_storage_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_vm_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_vm_info.py
new file mode 100644
index 000000000..81d164aa7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_vm_info.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_vm_info
+short_description: Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain.
+version_added: "1.0.0"
+author: "Maor Lipchuk (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain."
+ - This module was called C(ovirt_storage_vm_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_storage_vm_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_storage_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered virtual machines which contain one or more
+ disks which reside on a storage domain or diskless virtual machines."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of virtual machines to return. If not specified all the virtual machines are returned."
+ type: int
+ storage_domain:
+ description:
+ - "The storage domain name where the virtual machines should be listed."
+ type: str
+ required: True
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/vm/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all VMs which relate to a storage domain and
+# are unregistered:
+- ovirt.ovirt.ovirt_storage_vm_info:
+ unregistered: True
+ storage_domain: storage
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_vms }}"
+'''
+
+RETURN = '''
+ovirt_storage_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(type='str', required=True),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ vms_service = storage_domain_service.vms_service()
+
+ # Find the unregistered VM we want to register:
+ if module.params.get('unregistered'):
+ vms = vms_service.list(unregistered=True, follow=",".join(module.params['follow']))
+ else:
+ vms = vms_service.list(follow=",".join(module.params['follow']))
+ result = dict(
+ ovirt_storage_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vms
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_system_option_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_system_option_info.py
new file mode 100644
index 000000000..53ac0ddaf
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_system_option_info.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_system_option_info
+short_description: Retrieve information about one oVirt/RHV system options.
+version_added: "1.3.0"
+author: "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one oVirt/RHV system options."
+notes:
+ - "This module returns a variable C(ovirt_system_option_info), which
+ contains a dict of system option. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of system option."
+ type: str
+ version:
+ description:
+ - "The version of the option."
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/system_option/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- ovirt.ovirt.ovirt_system_option_info:
+ name: "ServerCPUList"
+ version: "4.4"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_system_option }}"
+'''
+
+RETURN = '''
+ovirt_system_option:
+ description: "Dictionary describing the system option. Option attributes are mapped to dictionary keys,
+ all option attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/system_option."
+ returned: On success.
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ version=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ options_service = connection.system_service().options_service()
+ option_service = options_service.option_service(module.params.get('name'))
+
+ try:
+ option = option_service.get(version=module.params.get('version'))
+ except Exception as e:
+ if str(e) == "HTTP response code is 404.":
+ raise ValueError("Could not find the option with name '{0}'".format(module.params.get('name')))
+ raise Exception("Unexpected error: '{0}'".format(e))
+
+ result = dict(
+ ovirt_system_option=get_dict_of_struct(
+ struct=option,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ),
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag.py
new file mode 100644
index 000000000..bf47f9a84
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_tag
+short_description: Module to manage tags in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "This module manage tags in oVirt/RHV. It can also manage assignments
+ of those tags to entities."
+options:
+ id:
+ description:
+ - "ID of the tag to manage."
+ type: str
+ name:
+ description:
+ - "Name of the tag to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the tag be present/absent/attached/detached."
+ - "C(Note): I(attached) and I(detached) states are supported since version 2.4."
+ choices: ['present', 'absent', 'attached', 'detached']
+ default: present
+ type: str
+ description:
+ description:
+ - "Description of the tag to manage."
+ type: str
+ parent:
+ description:
+ - "Name of the parent tag."
+ type: str
+ vms:
+ description:
+ - "List of the VMs names, which should have assigned this tag."
+ type: list
+ elements: str
+ hosts:
+ description:
+ - "List of the hosts names, which should have assigned this tag."
+ type: list
+ elements: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create(if not exists) and assign tag to vms vm1 and vm2:
+- ovirt.ovirt.ovirt_tag:
+ name: mytag
+ vms:
+ - vm1
+ - vm2
+
+# Attach a tag to VM 'vm3', keeping the rest already attached tags on VM:
+- ovirt.ovirt.ovirt_tag:
+ name: mytag
+ state: attached
+ vms:
+ - vm3
+
+# Detach a tag from VM 'vm3', keeping the rest already attached tags on VM:
+- ovirt.ovirt.ovirt_tag:
+ name: mytag
+ state: detached
+ vms:
+ - vm3
+
+# To detach all VMs from tag:
+- ovirt.ovirt.ovirt_tag:
+ name: mytag
+ vms: []
+
+# Remove tag
+- ovirt.ovirt.ovirt_tag:
+ state: absent
+ name: mytag
+
+# Change Tag Name
+- ovirt.ovirt.ovirt_tag:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_tag_name"
+'''
+
+RETURN = '''
+id:
+ description: ID of the tag which is managed
+ returned: On success if tag is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+tag:
+ description: "Dictionary of all the tag attributes. Tag attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
+ returned: On success if tag is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+)
+
+
+class TagsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Tag(
+ id=self._module.params['id'],
+ name=self._module.params['name'],
+ description=self._module.params['description'],
+ parent=otypes.Tag(
+ name=self._module.params['parent'],
+ ) if self._module.params['parent'] else None,
+ )
+
+ def post_create(self, entity):
+ self.update_check(entity)
+
+ def _update_tag_assignments(self, entity, name):
+ if self._module.params[name] is None:
+ return
+
+ state = self.param('state')
+ entities_service = getattr(self._connection.system_service(), '%s_service' % name)()
+ current_vms = [
+ vm.name
+ for vm in entities_service.list(search='tag=%s' % self._module.params['name'])
+ ]
+ # Assign tags:
+ if state in ['present', 'attached', 'detached']:
+ for entity_name in self._module.params[name]:
+ entity_id = get_id_by_name(entities_service, entity_name)
+ tags_service = entities_service.service(entity_id).tags_service()
+ current_tags = [tag.name for tag in tags_service.list()]
+ # Assign the tag:
+ if state in ['attached', 'present']:
+ if self._module.params['name'] not in current_tags:
+ if not self._module.check_mode:
+ tags_service.add(
+ tag=otypes.Tag(
+ name=self._module.params['name'],
+ ),
+ )
+ self.changed = True
+ # Detach the tag:
+ elif state == 'detached':
+ if self._module.params['name'] in current_tags:
+ tag_id = get_id_by_name(tags_service, self.param('name'))
+ if not self._module.check_mode:
+ tags_service.tag_service(tag_id).remove()
+ self.changed = True
+
+ # Unassign tags:
+ if state == 'present':
+ for entity_name in [e for e in current_vms if e not in self._module.params[name]]:
+ if not self._module.check_mode:
+ entity_id = get_id_by_name(entities_service, entity_name)
+ tags_service = entities_service.service(entity_id).tags_service()
+ tag_id = get_id_by_name(tags_service, self.param('name'))
+ tags_service.tag_service(tag_id).remove()
+ self.changed = True
+
+ def _get_parent(self, entity):
+ parent = None
+ if entity.parent:
+ parent = self._connection.follow_link(entity.parent).name
+ return parent
+
+ def update_check(self, entity):
+ self._update_tag_assignments(entity, 'vms')
+ self._update_tag_assignments(entity, 'hosts')
+ return (
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('parent'), self._get_parent(entity))
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'attached', 'detached'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(required=True),
+ description=dict(default=None),
+ parent=dict(default=None),
+ vms=dict(default=None, type='list', elements='str'),
+ hosts=dict(default=None, type='list', elements='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ tags_service = connection.system_service().tags_service()
+ tags_module = TagsModule(
+ connection=connection,
+ module=module,
+ service=tags_service,
+ )
+
+ state = module.params['state']
+ if state in ['present', 'attached', 'detached']:
+ ret = tags_module.create()
+ elif state == 'absent':
+ ret = tags_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag_info.py
new file mode 100644
index 000000000..01dd47163
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag_info.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_tag_info
+short_description: Retrieve information about one or more oVirt/RHV tags
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV tags."
+ - This module was called C(ovirt_tag_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_tag_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_tags), which
+ contains a list of tags. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the tag which should be listed."
+ type: str
+ vm:
+ description:
+ - "Name of the VM, which tags should be listed."
+ type: str
+ host:
+ description:
+ - "Name of the host, which tags should be listed."
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/tag/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all tags, which names start with C(tag):
+- ovirt.ovirt.ovirt_tag_info:
+ name: tag*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+# Gather information about all tags, which are assigned to VM C(postgres):
+- ovirt.ovirt.ovirt_tag_info:
+ vm: postgres
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+# Gather information about all tags, which are assigned to host C(west):
+- ovirt.ovirt.ovirt_tag_info:
+ host: west
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+'''
+
+RETURN = '''
+ovirt_tags:
+ description: "List of dictionaries describing the tags. Tags attributes are mapped to dictionary keys,
+ all tags attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ tags_service = connection.system_service().tags_service()
+ tags = []
+ all_tags = tags_service.list(
+ follow=",".join(module.params['follow'])
+ )
+ if module.params['name']:
+ tags.extend([
+ t for t in all_tags
+ if fnmatch.fnmatch(t.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ host = search_by_name(hosts_service, module.params['host'])
+ if host is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ tags.extend(hosts_service.host_service(host.id).tags_service().list(
+ follow=",".join(module.params['follow'])
+ ))
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ vm = search_by_name(vms_service, module.params['vm'])
+ if vm is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ tags.extend(vms_service.vm_service(vm.id).tags_service().list(
+ follow=",".join(module.params['follow'])
+ ))
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ tags = all_tags
+
+ result = dict(
+ ovirt_tags=[
+ get_dict_of_struct(
+ struct=t,
+ connection=connection,
+ fetch_nested=module.params['fetch_nested'],
+ attributes=module.params['nested_attributes'],
+ ) for t in tags
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template.py
new file mode 100644
index 000000000..5d8d6ea77
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template.py
@@ -0,0 +1,1195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_template
+short_description: Module to manage virtual machine templates in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage virtual machine templates in oVirt/RHV."
+options:
+ name:
+ description:
+ - "Name of the template to manage."
+ type: str
+ id:
+ description:
+ - "ID of the template to be registered."
+ type: str
+ state:
+ description:
+ - "Should the template be present/absent/exported/imported/registered.
+ When C(state) is I(registered) and the unregistered template's name
+ belongs to an already registered in engine template in the same DC
+ then we fail to register the unregistered template."
+ choices: ['present', 'absent', 'exported', 'imported', 'registered']
+ default: present
+ type: str
+ vm:
+ description:
+ - "Name of the VM, which will be used to create template."
+ type: str
+ description:
+ description:
+ - "Description of the template."
+ type: str
+ cpu_profile:
+ description:
+ - "CPU profile to be set to template."
+ type: str
+ cluster:
+ description:
+ - "Name of the cluster, where template should be created/imported."
+ type: str
+ allow_partial_import:
+ description:
+ - "Boolean indication whether to allow partial registration of a template when C(state) is registered."
+ type: bool
+ vnic_profile_mappings:
+ description:
+ - "Mapper which maps an external virtual NIC profile to one that exists in the engine when C(state) is registered.
+ vnic_profile is described by the following dictionary:"
+ suboptions:
+ source_network_name:
+ description:
+ - The network name of the source network.
+ source_profile_name:
+ description:
+ - The profile name related to the source network.
+ target_profile_id:
+ description:
+ - The id of the target profile id to be mapped to in the engine.
+ type: list
+ elements: dict
+ cluster_mappings:
+ description:
+ - "Mapper which maps cluster name between Template's OVF and the destination cluster this Template should be registered to,
+ relevant when C(state) is registered.
+ Cluster mapping is described by the following dictionary:"
+ suboptions:
+ source_name:
+ description:
+ - The name of the source cluster.
+ dest_name:
+ description:
+ - The name of the destination cluster.
+ type: list
+ elements: dict
+ role_mappings:
+ description:
+ - "Mapper which maps role name between Template's OVF and the destination role this Template should be registered to,
+ relevant when C(state) is registered.
+ Role mapping is described by the following dictionary:"
+ suboptions:
+ source_name:
+ description:
+ - The name of the source role.
+ dest_name:
+ description:
+ - The name of the destination role.
+ type: list
+ elements: dict
+ domain_mappings:
+ description:
+ - "Mapper which maps aaa domain name between Template's OVF and the destination aaa domain this Template should be registered to,
+ relevant when C(state) is registered.
+ The aaa domain mapping is described by the following dictionary:"
+ suboptions:
+ source_name:
+ description:
+ - The name of the source aaa domain.
+ dest_name:
+ description:
+ - The name of the destination aaa domain.
+ type: list
+ elements: dict
+ exclusive:
+ description:
+ - "When C(state) is I(exported) this parameter indicates if the existing templates with the
+ same name should be overwritten."
+ type: bool
+ export_domain:
+ description:
+ - "When C(state) is I(exported) or I(imported) this parameter specifies the name of the
+ export storage domain."
+ type: str
+ image_provider:
+ description:
+ - "When C(state) is I(imported) this parameter specifies the name of the image provider to be used."
+ type: str
+ image_disk:
+ description:
+ - "When C(state) is I(imported) and C(image_provider) is used this parameter specifies the name of disk
+ to be imported as template."
+ aliases: ['glance_image_disk_name']
+ type: str
+ io_threads:
+ description:
+ - "Number of IO threads used by virtual machine. I(0) means IO threading disabled."
+ type: int
+ template_image_disk_name:
+ description:
+ - "When C(state) is I(imported) and C(image_provider) is used this parameter specifies the new name for imported disk,
+ if omitted then I(image_disk) name is used by default.
+ This parameter is used only in case of importing disk image from Glance domain."
+ type: str
+ storage_domain:
+ description:
+ - "When C(state) is I(imported) this parameter specifies the name of the destination data storage domain.
+ When C(state) is I(registered) this parameter specifies the name of the data storage domain of the unregistered template."
+ type: str
+ clone_permissions:
+ description:
+ - "If I(True) then the permissions of the VM (only the direct ones, not the inherited ones)
+ will be copied to the created template."
+ - "This parameter is used only when C(state) I(present)."
+ type: bool
+ seal:
+ description:
+ - "'Sealing' is an operation that erases all machine-specific configurations from a filesystem:
+ This includes SSH keys, UDEV rules, MAC addresses, system ID, hostname, etc.
+ If I(true) subsequent virtual machines made from this template will avoid configuration inheritance."
+ - "This parameter is used only when C(state) I(present)."
+ type: bool
+ operating_system:
+ description:
+ - Operating system of the template, for example 'rhel_8x64'.
+ - Default value is set by oVirt/RHV engine.
+ - Use the M(ovirt.ovirt.ovirt_vm_os_info) module to obtain the current list.
+ type: str
+ memory:
+ description:
+ - Amount of memory of the template. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ type: str
+ memory_guaranteed:
+ description:
+ - Amount of minimal guaranteed memory of the template.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
+ type: str
+ memory_max:
+ description:
+ - Upper bound of template memory up to which memory hot-plug can be performed.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ type: str
+ version:
+ description:
+ - "C(name) - The name of this version."
+ - "C(number) - The index of this version in the versions hierarchy of the template. Used for editing of sub template."
+ type: dict
+ clone_name:
+ description:
+ - Name for importing Template from storage domain.
+ - If not defined, C(name) will be used.
+ type: str
+ usb_support:
+ description:
+ - "I(True) enable USB support, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ timezone:
+ description:
+ - Sets time zone offset of the guest hardware clock.
+ - For example C(Etc/GMT)
+ type: str
+ sso:
+ description:
+ - "I(True) enable Single Sign On by Guest Agent, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ soundcard_enabled:
+ description:
+ - "If I(true), the sound card is added to the virtual machine."
+ type: bool
+ smartcard_enabled:
+ description:
+ - "If I(true), use smart card authentication."
+ type: bool
+ kvm:
+ description:
+ - Dictionary of values to be used to connect to kvm and import
+ a template to oVirt.
+ type: dict
+ suboptions:
+ url:
+ description:
+ - The URL to be passed to the I(virt-v2v) tool for conversion.
+ - For example I(qemu:///system). This is required parameter.
+ storage_domain:
+ description:
+ - Specifies the target storage domain for converted disks. This is required parameter.
+ host:
+ description:
+ - The host name from which the template will be imported.
+ clone:
+ description:
+ - Indicates if the identifiers of the imported template should be regenerated.
+ cloud_init:
+ description:
+ - Dictionary with values for Unix-like Virtual Machine initialization using cloud init.
+ type: dict
+ suboptions:
+ host_name:
+ description:
+ - Hostname to be set to Virtual Machine when deployed.
+ timezone:
+ description:
+ - Timezone to be set to Virtual Machine when deployed.
+ user_name:
+ description:
+ - Username to be used to set password to Virtual Machine when deployed.
+ root_password:
+ description:
+ - Password to be set for user specified by C(user_name) parameter.
+ authorized_ssh_keys:
+ description:
+ - Use this SSH keys to login to Virtual Machine.
+ regenerate_ssh_keys:
+ description:
+ - If I(True) SSH keys will be regenerated on Virtual Machine.
+ type: bool
+ custom_script:
+ description:
+ - Cloud-init script which will be executed on Virtual Machine when deployed.
+ - This is appended to the end of the cloud-init script generated by any other options.
+ - For further information, refer to cloud-init User-Data documentation.
+ dns_servers:
+ description:
+ - DNS servers to be configured on Virtual Machine, maximum of two, space-separated.
+ dns_search:
+ description:
+ - DNS search domains to be configured on Virtual Machine.
+ nic_boot_protocol:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine.
+ choices: ['none', 'dhcp', 'static']
+ nic_ip_address:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ nic_name:
+ description:
+ - Set name to network interface of Virtual Machine.
+ cloud_init_nics:
+ description:
+ - List of dictionaries representing network interfaces to be setup by cloud init.
+ - This option is used, when user needs to setup more network interfaces via cloud init.
+ - If one network interface is enough, user should use C(cloud_init) I(nic_*) parameters. C(cloud_init) I(nic_*) parameters
+ are merged with C(cloud_init_nics) parameters.
+ type: list
+ elements: dict
+ suboptions:
+ nic_boot_protocol:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
+ nic_ip_address:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ nic_name:
+ description:
+ - Set name to network interface of Virtual Machine.
+ ballooning_enabled:
+ description:
+ - "If I(true), use memory ballooning."
+ - "Memory balloon is a guest device, which may be used to re-distribute / reclaim the host memory
+ based on VM needs in a dynamic way. In this way it's possible to create memory over commitment states."
+ type: bool
+ bios_type:
+ description:
+ - "Set bios type, necessary for some operating systems and secure boot."
+ - "If no value is passed, default value is set from cluster."
+ - "NOTE - Supported since oVirt 4.3."
+ choices: [ i440fx_sea_bios, q35_ovmf, q35_sea_bios, q35_secure_boot ]
+ type: str
+ version_added: 2.0.0
+ boot_menu:
+ description:
+ - "I(True) enable menu to select boot device, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ version_added: 2.0.0
+ nics:
+ description:
+ - List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the NIC.
+ profile_name:
+ description:
+ - Profile name where NIC should be attached.
+ interface:
+ description:
+ - Type of the network interface.
+ choices: ['virtio', 'e1000', 'rtl8139']
+ default: 'virtio'
+ mac_address:
+ description:
+ - Custom MAC address of the network interface, by default it's obtained from MAC pool.
+ sysprep:
+ description:
+ - Dictionary with values for Windows Virtual Machine initialization using sysprep.
+ type: dict
+ suboptions:
+ host_name:
+ description:
+ - Hostname to be set to Virtual Machine when deployed.
+ active_directory_ou:
+ description:
+ - Active Directory Organizational Unit, to be used for login of user.
+ org_name:
+ description:
+ - Organization name to be set to Windows Virtual Machine.
+ domain:
+ description:
+ - Domain to be set to Windows Virtual Machine.
+ timezone:
+ description:
+ - Timezone to be set to Windows Virtual Machine.
+ ui_language:
+ description:
+ - UI language of the Windows Virtual Machine.
+ system_locale:
+ description:
+ - System localization of the Windows Virtual Machine.
+ input_locale:
+ description:
+ - Input localization of the Windows Virtual Machine.
+ windows_license_key:
+ description:
+ - License key to be set to Windows Virtual Machine.
+ user_name:
+ description:
+ - Username to be used for set password to Windows Virtual Machine.
+ root_password:
+ description:
+ - Password to be set for username to Windows Virtual Machine.
+ custom_script:
+ description:
+ - A custom Sysprep definition in the format of a complete unattended installation answer file.
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create template from vm
+- ovirt.ovirt.ovirt_template:
+ cluster: Default
+ name: mytemplate
+ vm: rhel7
+ cpu_profile: Default
+ description: Test
+
+# Import template
+- ovirt.ovirt.ovirt_template:
+ state: imported
+ name: mytemplate
+ export_domain: myexport
+ storage_domain: mystorage
+ cluster: mycluster
+
+# Remove template
+- ovirt.ovirt.ovirt_template:
+ state: absent
+ name: mytemplate
+
+# Change Template Name
+- ovirt.ovirt.ovirt_template:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_template_name"
+
+# Register template
+- ovirt.ovirt.ovirt_template:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ name: mytemplate
+
+# Register template using id
+- ovirt.ovirt.ovirt_template:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+
+# Register template, allowing partial import
+- ovirt.ovirt.ovirt_template:
+ state: registered
+ storage_domain: mystorage
+ allow_partial_import: "True"
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+
+# Register template with vnic profile mappings
+- ovirt.ovirt.ovirt_template:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+ vnic_profile_mappings:
+ - source_network_name: mynetwork
+ source_profile_name: mynetwork
+ target_profile_id: 3333-3333-3333-3333
+ - source_network_name: mynetwork2
+ source_profile_name: mynetwork2
+ target_profile_id: 4444-4444-4444-4444
+
+# Register template with mapping
+- ovirt.ovirt.ovirt_template:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+ role_mappings:
+ - source_name: Role_A
+ dest_name: Role_B
+ domain_mappings:
+ - source_name: Domain_A
+ dest_name: Domain_B
+ cluster_mappings:
+ - source_name: cluster_A
+ dest_name: cluster_B
+
+# Import image from Glance s a template
+- ovirt.ovirt.ovirt_template:
+ state: imported
+ name: mytemplate
+ image_disk: "centos7"
+ template_image_disk_name: centos7_from_glance
+ image_provider: "glance_domain"
+ storage_domain: mystorage
+ cluster: mycluster
+
+# Edit template subversion
+- ovirt.ovirt.ovirt_template:
+ cluster: mycluster
+ name: mytemplate
+ vm: rhel7
+ version:
+ number: 2
+ name: subversion
+
+# Create new template subversion
+- ovirt.ovirt.ovirt_template:
+ cluster: mycluster
+ name: mytemplate
+ vm: rhel7
+ version:
+ name: subversion
+
+- name: Template with cloud init
+ ovirt.ovirt.ovirt_template:
+ name: mytemplate
+ cluster: Default
+ vm: rhel8
+ memory: 1GiB
+ cloud_init:
+ dns_servers: '8.8.8.8 8.8.4.4'
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+ nic_name: eth1
+ host_name: example.com
+ custom_script: |
+ write_files:
+ - content: |
+ Hello, world!
+ path: /tmp/greeting.txt
+ permissions: '0644'
+ user_name: root
+ root_password: super_password
+
+- name: Template with cloud init, with multiple network interfaces
+ ovirt.ovirt.ovirt_template:
+ name: mytemplate
+ cluster: mycluster
+ vm: rhel8
+ cloud_init_nics:
+ - nic_name: eth0
+ nic_boot_protocol: dhcp
+ - nic_name: eth1
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+
+- name: Template with timezone and nic
+ ovirt.ovirt.ovirt_template:
+ cluster: MyCluster
+ name: mytemplate
+ vm: rhel8
+ timezone: America/Godthab
+ memory_max: 2Gib
+ nics:
+ - name: nic1
+
+- name: Template with sysprep
+ ovirt.ovirt.ovirt_template:
+ name: windows2012R2_AD
+ cluster: Default
+ vm: windows2012
+ memory: 3GiB
+ sysprep:
+ host_name: windowsad.example.com
+ user_name: Administrator
+ root_password: SuperPassword123
+
+- name: Import external ova template
+ ovirt.ovirt.ovirt_template:
+ cluster: mycluster
+ name: mytemplate
+ state: present
+ timeout: 1800
+ poll_interval: 30
+ kvm:
+ host: myhost
+ url: ova:///tmp/test.ova
+ storage_domain: mystorage
+'''
+
+RETURN = '''
+id:
+ description: ID of the template which is managed
+ returned: On success if template is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+template:
+ description: "Dictionary of all the template attributes. Template attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success if template is found.
+ type: dict
+'''
+
+import time
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ convert_to_bytes,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ get_link_name,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+ wait,
+)
+
+
+class TemplatesModule(BaseModule):
+
+ def __init__(self, *args, **kwargs):
+ super(TemplatesModule, self).__init__(*args, **kwargs)
+ self._initialization = None
+
+ def build_entity(self):
+ return otypes.Template(
+ id=self._module.params['id'],
+ name=self._module.params['name'],
+ cluster=otypes.Cluster(
+ name=self._module.params['cluster']
+ ) if self._module.params['cluster'] else None,
+ vm=otypes.Vm(
+ name=self._module.params['vm']
+ ) if self._module.params['vm'] else None,
+ bios=(
+ otypes.Bios(
+ boot_menu=otypes.BootMenu(enabled=self.param('boot_menu')) if self.param('boot_menu') is not None else None,
+ type=otypes.BiosType[self.param('bios_type').upper()] if self.param('bios_type') is not None else None
+ )
+ ) if self.param('boot_menu') is not None or self.param('bios_type') is not None else None,
+ description=self._module.params['description'],
+ cpu_profile=otypes.CpuProfile(
+ id=search_by_name(
+ self._connection.system_service().cpu_profiles_service(),
+ self._module.params['cpu_profile'],
+ ).id
+ ) if self._module.params['cpu_profile'] else None,
+ display=otypes.Display(
+ smartcard_enabled=self.param('smartcard_enabled')
+ ) if self.param('smartcard_enabled') is not None else None,
+ os=otypes.OperatingSystem(
+ type=self.param('operating_system'),
+ ) if self.param('operating_system') else None,
+ memory=convert_to_bytes(
+ self.param('memory')
+ ) if self.param('memory') else None,
+ soundcard_enabled=self.param('soundcard_enabled'),
+ usb=(
+ otypes.Usb(enabled=self.param('usb_support'))
+ ) if self.param('usb_support') is not None else None,
+ sso=(
+ otypes.Sso(
+ methods=[otypes.Method(id=otypes.SsoMethod.GUEST_AGENT)] if self.param('sso') else []
+ )
+ ) if self.param('sso') is not None else None,
+ time_zone=otypes.TimeZone(
+ name=self.param('timezone'),
+ ) if self.param('timezone') else None,
+ version=otypes.TemplateVersion(
+ base_template=self._get_base_template(),
+ version_name=self.param('version').get('name'),
+ ) if self.param('version') else None,
+ memory_policy=otypes.MemoryPolicy(
+ guaranteed=convert_to_bytes(self.param('memory_guaranteed')),
+ ballooning=self.param('ballooning_enabled'),
+ max=convert_to_bytes(self.param('memory_max')),
+ ) if any((
+ self.param('memory_guaranteed'),
+ self.param('ballooning_enabled'),
+ self.param('memory_max')
+ )) else None,
+ io=otypes.Io(
+ threads=self.param('io_threads'),
+ ) if self.param('io_threads') is not None else None,
+ initialization=self.get_initialization(),
+ )
+
+ def _get_base_template(self):
+ # The base template is the template with the lowest version_number.
+ # Not necessarily version 1
+ templates = self._connection.system_service().templates_service().list()
+ if not templates:
+ return None
+ template_name = self.param('name')
+ named_templates = [t for t in templates if t.name == template_name]
+ if not named_templates:
+ return None
+ base_template = min(named_templates, key=lambda x: x.version.version_number)
+ return otypes.Template(
+ id=base_template.id
+ )
+
+ def post_update(self, entity):
+ self.post_present(entity.id)
+
+ def post_present(self, entity_id):
+ # After creation of the VM, attach disks and NICs:
+ entity = self._service.service(entity_id).get()
+ self.__attach_nics(entity)
+
+ def __get_vnic_profile_id(self, nic):
+ """
+ Return VNIC profile ID looked up by it's name, because there can be
+ more VNIC profiles with same name, other criteria of filter is cluster.
+ """
+ vnics_service = self._connection.system_service().vnic_profiles_service()
+ clusters_service = self._connection.system_service().clusters_service()
+ cluster = search_by_name(clusters_service, self.param('cluster'))
+ profiles = [
+ profile for profile in vnics_service.list()
+ if profile.name == nic.get('profile_name')
+ ]
+ cluster_networks = [
+ net.id for net in self._connection.follow_link(cluster.networks)
+ ]
+ try:
+ return next(
+ profile.id for profile in profiles
+ if profile.network.id in cluster_networks
+ )
+ except StopIteration:
+ raise Exception(
+ "Profile '%s' was not found in cluster '%s'" % (
+ nic.get('profile_name'),
+ self.param('cluster')
+ )
+ )
+
+ def __attach_nics(self, entity):
+ # Attach NICs to VM, if specified:
+ nics_service = self._service.service(entity.id).nics_service()
+ for nic in self.param('nics'):
+ if search_by_name(nics_service, nic.get('name')) is None:
+ if not self._module.check_mode:
+ nics_service.add(
+ otypes.Nic(
+ name=nic.get('name'),
+ interface=otypes.NicInterface(
+ nic.get('interface', 'virtio')
+ ),
+ vnic_profile=otypes.VnicProfile(
+ id=self.__get_vnic_profile_id(nic),
+ ) if nic.get('profile_name') else None,
+ mac=otypes.Mac(
+ address=nic.get('mac_address')
+ ) if nic.get('mac_address') else None,
+ )
+ )
+ self.changed = True
+
+ def get_initialization(self):
+ if self._initialization is not None:
+ return self._initialization
+
+ sysprep = self.param('sysprep')
+ cloud_init = self.param('cloud_init')
+ cloud_init_nics = self.param('cloud_init_nics') or []
+ if cloud_init is not None:
+ cloud_init_nics.append(cloud_init)
+
+ if cloud_init or cloud_init_nics:
+ self._initialization = otypes.Initialization(
+ nic_configurations=[
+ otypes.NicConfiguration(
+ boot_protocol=otypes.BootProtocol(
+ nic.pop('nic_boot_protocol').lower()
+ ) if nic.get('nic_boot_protocol') else None,
+ name=nic.pop('nic_name', None),
+ on_boot=True,
+ ip=otypes.Ip(
+ address=nic.pop('nic_ip_address', None),
+ netmask=nic.pop('nic_netmask', None),
+ gateway=nic.pop('nic_gateway', None),
+ ) if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None
+ ) else None,
+ )
+ for nic in cloud_init_nics
+ if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None or
+ nic.get('nic_boot_protocol') is not None
+ )
+ ] if cloud_init_nics else None,
+ **cloud_init
+ )
+ elif sysprep:
+ self._initialization = otypes.Initialization(
+ **sysprep
+ )
+ return self._initialization
+
+ def update_check(self, entity):
+ template_display = entity.display
+ return (
+ equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self.param('operating_system'), str(entity.os.type)) and
+ equal(self.param('name'), str(entity.name)) and
+ equal(self.param('smartcard_enabled'), getattr(template_display, 'smartcard_enabled', False)) and
+ equal(self.param('soundcard_enabled'), entity.soundcard_enabled) and
+ equal(self.param('ballooning_enabled'), entity.memory_policy.ballooning) and
+ equal(self.param('boot_menu'), entity.bios.boot_menu.enabled) and
+ equal(self.param('bios_type'), entity.bios.type.value) and
+ equal(self.param('sso'), True if entity.sso.methods else False) and
+ equal(self.param('timezone'), getattr(entity.time_zone, 'name', None)) and
+ equal(self.param('usb_support'), entity.usb.enabled) and
+ equal(convert_to_bytes(self.param('memory_guaranteed')), entity.memory_policy.guaranteed) and
+ equal(convert_to_bytes(self.param('memory_max')), entity.memory_policy.max) and
+ equal(convert_to_bytes(self.param('memory')), entity.memory) and
+ equal(self._module.params.get('cpu_profile'), get_link_name(self._connection, entity.cpu_profile)) and
+ equal(self.param('io_threads'), entity.io.threads)
+ )
+
+ def _get_export_domain_service(self):
+ provider_name = self._module.params['export_domain'] or self._module.params['image_provider']
+ export_sds_service = self._connection.system_service().storage_domains_service()
+ export_sd = search_by_name(export_sds_service, provider_name)
+ if export_sd is None:
+ raise ValueError(
+ "Export storage domain/Image Provider '%s' wasn't found." % provider_name
+ )
+
+ return export_sds_service.service(export_sd.id)
+
+ def post_export_action(self, entity):
+ self._service = self._get_export_domain_service().templates_service()
+
+ def post_import_action(self, entity):
+ self._service = self._connection.system_service().templates_service()
+
+
+def _get_role_mappings(module):
+ roleMappings = list()
+
+ for roleMapping in module.params['role_mappings']:
+ roleMappings.append(
+ otypes.RegistrationRoleMapping(
+ from_=otypes.Role(
+ name=roleMapping['source_name'],
+ ) if roleMapping['source_name'] else None,
+ to=otypes.Role(
+ name=roleMapping['dest_name'],
+ ) if roleMapping['dest_name'] else None,
+ )
+ )
+ return roleMappings
+
+
+def _get_domain_mappings(module):
+ domainMappings = list()
+
+ for domainMapping in module.params['domain_mappings']:
+ domainMappings.append(
+ otypes.RegistrationDomainMapping(
+ from_=otypes.Domain(
+ name=domainMapping['source_name'],
+ ) if domainMapping['source_name'] else None,
+ to=otypes.Domain(
+ name=domainMapping['dest_name'],
+ ) if domainMapping['dest_name'] else None,
+ )
+ )
+ return domainMappings
+
+
+def _get_cluster_mappings(module):
+ clusterMappings = list()
+
+ for clusterMapping in module.params['cluster_mappings']:
+ clusterMappings.append(
+ otypes.RegistrationClusterMapping(
+ from_=otypes.Cluster(
+ name=clusterMapping['source_name'],
+ ),
+ to=otypes.Cluster(
+ name=clusterMapping['dest_name'],
+ ),
+ )
+ )
+ return clusterMappings
+
+
+def _get_vnic_profile_mappings(module):
+ vnicProfileMappings = list()
+
+ for vnicProfileMapping in module.params['vnic_profile_mappings']:
+ vnicProfileMappings.append(
+ otypes.VnicProfileMapping(
+ source_network_name=vnicProfileMapping['source_network_name'],
+ source_network_profile_name=vnicProfileMapping['source_profile_name'],
+ target_vnic_profile=otypes.VnicProfile(
+ id=vnicProfileMapping['target_profile_id'],
+ ) if vnicProfileMapping['target_profile_id'] else None,
+ )
+ )
+
+ return vnicProfileMappings
+
+
+def import_template(module, connection):
+ templates_service = connection.system_service().templates_service()
+ if search_by_name(templates_service, module.params['name']) is not None:
+ return False
+
+ events_service = connection.system_service().events_service()
+ last_event = events_service.list(max=1)[0]
+
+ external_template = module.params['kvm']
+ imports_service = connection.system_service().external_template_imports_service()
+ imported_template = imports_service.add(
+ otypes.ExternalTemplateImport(
+ template=otypes.Template(
+ name=module.params['name']
+ ),
+ url=external_template.get('url'),
+ cluster=otypes.Cluster(
+ name=module.params['cluster'],
+ ) if module.params['cluster'] else None,
+ storage_domain=otypes.StorageDomain(
+ name=external_template.get('storage_domain'),
+ ) if external_template.get('storage_domain') else None,
+ host=otypes.Host(
+ name=external_template.get('host'),
+ ) if external_template.get('host') else None,
+ clone=external_template.get('clone', None),
+ )
+ )
+
+ # Wait until event with code 1158 for our template:
+ templates_service = connection.system_service().templates_service()
+ wait(
+ service=templates_service.template_service(imported_template.template.id),
+ condition=lambda tmp: len(events_service.list(
+ from_=int(last_event.id),
+ search='type=1158 and message=*%s*' % tmp.name,
+ )
+ ) > 0 if tmp is not None else False,
+ fail_condition=lambda tmp: tmp is None,
+ timeout=module.params['timeout'],
+ poll_interval=module.params['poll_interval'],
+ )
+ return True
+
+
+def find_subversion_template(module, templates_service):
+ version = module.params.get('version')
+ templates = templates_service.list()
+ for template in templates:
+ if version.get('number') == template.version.version_number and module.params.get('name') == template.name:
+ return template
+
+ # when user puts version number which does not exist
+ raise ValueError(
+ "Template with name '%s' and version '%s' in cluster '%s' was not found'" % (
+ module.params['name'],
+ module.params['version']['number'],
+ module.params['cluster'],
+ )
+ )
+
+
+def searchable_attributes(module):
+ """
+ Return all searchable template attributes passed to module.
+ """
+ attributes = {
+ 'name': module.params.get('name'),
+ 'cluster': module.params.get('cluster'),
+ }
+ return dict((k, v) for k, v in attributes.items() if v is not None)
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'exported', 'imported', 'registered'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(default=None),
+ vm=dict(default=None),
+ timezone=dict(type='str'),
+ description=dict(default=None),
+ sso=dict(type='bool'),
+ ballooning_enabled=dict(type='bool', default=None),
+ cluster=dict(default=None),
+ usb_support=dict(type='bool'),
+ allow_partial_import=dict(default=None, type='bool'),
+ cpu_profile=dict(default=None),
+ clone_permissions=dict(type='bool'),
+ export_domain=dict(default=None),
+ storage_domain=dict(default=None),
+ exclusive=dict(type='bool'),
+ kvm=dict(type='dict'),
+ bios_type=dict(type='str', choices=['i440fx_sea_bios', 'q35_ovmf', 'q35_sea_bios', 'q35_secure_boot']),
+ boot_menu=dict(type='bool'),
+ clone_name=dict(default=None),
+ image_provider=dict(default=None),
+ soundcard_enabled=dict(type='bool', default=None),
+ smartcard_enabled=dict(type='bool', default=None),
+ image_disk=dict(default=None, aliases=['glance_image_disk_name']),
+ io_threads=dict(type='int', default=None),
+ template_image_disk_name=dict(default=None),
+ version=dict(default=None, type='dict'),
+ seal=dict(type='bool'),
+ vnic_profile_mappings=dict(default=[], type='list', elements='dict'),
+ cluster_mappings=dict(default=[], type='list', elements='dict'),
+ role_mappings=dict(default=[], type='list', elements='dict'),
+ domain_mappings=dict(default=[], type='list', elements='dict'),
+ operating_system=dict(type='str'),
+ memory=dict(type='str'),
+ memory_guaranteed=dict(type='str'),
+ memory_max=dict(type='str'),
+ nics=dict(type='list', default=[], elements='dict'),
+ cloud_init=dict(type='dict'),
+ cloud_init_nics=dict(type='list', default=[], elements='dict'),
+ sysprep=dict(type='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['id', 'name']],
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ templates_service = connection.system_service().templates_service()
+ templates_module = TemplatesModule(
+ connection=connection,
+ module=module,
+ service=templates_service,
+ )
+
+ entity = None
+ if module.params['version'] is not None and module.params['version'].get('number') is not None:
+ entity = find_subversion_template(module, templates_service)
+
+ state = module.params['state']
+ if state == 'present':
+ force_create = False
+ if entity is None and module.params['version'] is not None:
+ force_create = True
+
+ if module.params['kvm']:
+ templates_module.changed = import_template(module, connection)
+
+ ret = templates_module.create(
+ entity=entity,
+ # When user want to create new template subversion, we must make sure
+ # template is force created as it already exists, but new version should be created.
+ force_create=force_create,
+ result_state=otypes.TemplateStatus.OK,
+ search_params=searchable_attributes(module),
+ clone_permissions=module.params['clone_permissions'],
+ seal=module.params['seal'],
+ )
+ elif state == 'absent':
+ ret = templates_module.remove(entity=entity)
+ elif state == 'exported':
+ template = templates_module.search_entity()
+ if entity is not None:
+ template = entity
+ export_service = templates_module._get_export_domain_service()
+ export_template = search_by_attributes(export_service.templates_service(), id=template.id)
+
+ ret = templates_module.action(
+ entity=template,
+ action='export',
+ action_condition=lambda t: export_template is None or module.params['exclusive'],
+ wait_condition=lambda t: t is not None,
+ post_action=templates_module.post_export_action,
+ storage_domain=otypes.StorageDomain(id=export_service.get().id),
+ exclusive=module.params['exclusive'],
+ )
+ elif state == 'imported':
+ template = templates_module.search_entity()
+ if entity is not None:
+ template = entity
+ if template and module.params['clone_name'] is None:
+ ret = templates_module.create(
+ result_state=otypes.TemplateStatus.OK,
+ )
+ else:
+ kwargs = {}
+ if module.params['image_provider']:
+ kwargs.update(
+ disk=otypes.Disk(
+ name=module.params['template_image_disk_name'] or module.params['image_disk']
+ ),
+ template=otypes.Template(
+ name=module.params['name'] if module.params['clone_name'] is None else module.params['clone_name'],
+ ),
+ clone=True if module.params['clone_name'] is not None else False,
+ import_as_template=True,
+ )
+
+ if module.params['image_disk']:
+ # We need to refresh storage domain to get list of images:
+ templates_module._get_export_domain_service().images_service().list()
+
+ glance_service = connection.system_service().openstack_image_providers_service()
+ image_provider = search_by_name(glance_service, module.params['image_provider'])
+ images_service = glance_service.service(image_provider.id).images_service()
+ else:
+ images_service = templates_module._get_export_domain_service().templates_service()
+ template_name = module.params['image_disk'] or module.params['name']
+ entity = search_by_name(images_service, template_name)
+ if entity is None:
+ raise Exception("Image/template '%s' was not found." % template_name)
+
+ images_service.service(entity.id).import_(
+ storage_domain=otypes.StorageDomain(
+ name=module.params['storage_domain']
+ ) if module.params['storage_domain'] else None,
+ cluster=otypes.Cluster(
+ name=module.params['cluster']
+ ) if module.params['cluster'] else None,
+ **kwargs
+ )
+ # Wait for template to appear in system:
+ template = templates_module.wait_for_import(
+ condition=lambda t: t.status == otypes.TemplateStatus.OK
+ )
+ if template is None:
+ raise TimeoutError("Image/template '%s' could not be imported. Try again with larger timeout." % template_name)
+ ret = templates_module.create(result_state=otypes.TemplateStatus.OK)
+ elif state == 'registered':
+ storage_domains_service = connection.system_service().storage_domains_service()
+ # Find the storage domain with unregistered template:
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ templates_service = storage_domain_service.templates_service()
+
+ # Find the unregistered Template we want to register:
+ templates = templates_service.list(unregistered=True)
+ template = next(
+ (t for t in templates if (t.id == module.params['id'] or t.name == module.params['name'])),
+ None
+ )
+ changed = False
+ if template is None:
+ template = templates_module.search_entity()
+ if template is None:
+ raise ValueError(
+ "Template '%s(%s)' wasn't found." % (module.params['name'], module.params['id'])
+ )
+ else:
+ # Register the template into the system:
+ changed = True
+ template_service = templates_service.template_service(template.id)
+ template_service.register(
+ allow_partial_import=module.params['allow_partial_import'],
+ cluster=otypes.Cluster(
+ name=module.params['cluster']
+ ) if module.params['cluster'] else None,
+ vnic_profile_mappings=_get_vnic_profile_mappings(module)
+ if module.params['vnic_profile_mappings'] else None,
+ registration_configuration=otypes.RegistrationConfiguration(
+ cluster_mappings=_get_cluster_mappings(module),
+ role_mappings=_get_role_mappings(module),
+ domain_mappings=_get_domain_mappings(module),
+ ) if (module.params['cluster_mappings']
+ or module.params['role_mappings']
+ or module.params['domain_mappings']) else None
+ )
+
+ if module.params['wait']:
+ template = templates_module.wait_for_import()
+ else:
+ # Fetch template to initialize return.
+ template = template_service.get()
+ ret = templates_module.create(result_state=otypes.TemplateStatus.OK)
+ ret = {
+ 'changed': changed,
+ 'id': template.id,
+ 'template': get_dict_of_struct(template)
+ }
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template_info.py
new file mode 100644
index 000000000..8ae011382
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template_info.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_template_info
+short_description: Retrieve information about one or more oVirt/RHV templates
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV templates."
+ - This module was called C(ovirt_template_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_template_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search template X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/template/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all templates which names start with C(centos) and
+# belongs to data center C(west):
+- ovirt.ovirt.ovirt_template_info:
+ pattern: name=centos* and datacenter=west
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_templates }}"
+'''
+
+RETURN = '''
+ovirt_templates:
+ description: "List of dictionaries describing the templates. Template attributes are mapped to dictionary keys,
+ all templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ templates_service = connection.system_service().templates_service()
+ templates = templates_service.list(
+ search=module.params['pattern'],
+ follow=",".join(module.params['follow'])
+ )
+ result = dict(
+ ovirt_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user.py
new file mode 100644
index 000000000..c0a8f0508
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_user
+short_description: Module to manage users in oVirt/RHV
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage users in oVirt/RHV."
+options:
+ name:
+ description:
+ - "Name of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the user be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ authz_name:
+ description:
+ - "Authorization provider of the user. In previous versions of oVirt/RHV known as domain."
+ required: true
+ aliases: ['domain']
+ type: str
+ namespace:
+ description:
+ - "Namespace where the user resides. When using the authorization provider that stores users in the LDAP server,
+ this attribute equals the naming context of the LDAP server."
+ type: str
+ ssh_public_key:
+ description:
+ - "The user public key."
+ type: str
+ version_added: 1.4.0
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add user user1 from authorization provider example.com-authz
+- ovirt.ovirt.ovirt_user:
+ name: user1
+ domain: example.com-authz
+
+# Add user user1 from authorization provider example.com-authz
+# In case of Active Directory specify UPN:
+- ovirt.ovirt.ovirt_user:
+ name: user1@ad2.example.com
+ domain: example.com-authz
+
+# Remove user user1 with authorization provider example.com-authz
+- ovirt.ovirt.ovirt_user:
+ state: absent
+ name: user1
+ authz_name: example.com-authz
+
+# Remove ssh_public_key
+- ovirt.ovirt.ovirt_user:
+ name: user1
+ authz_name: example.com-authz
+ ssh_public_key: ""
+'''
+
+RETURN = '''
+id:
+ description: ID of the user which is managed
+ returned: On success if user is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+user:
+ description: "Dictionary of all the user attributes. User attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user."
+ returned: On success if user is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ ovirt_full_argument_spec,
+)
+
+
+def username(module):
+ return '{0}@{1}'.format(module.params['name'], module.params['authz_name'])
+
+
+class UsersModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.User(
+ domain=otypes.Domain(
+ name=self._module.params['authz_name']
+ ),
+ user_name=username(self._module),
+ principal=self._module.params['name'],
+ namespace=self._module.params['namespace'],
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ authz_name=dict(required=True, aliases=['domain']),
+ namespace=dict(default=None),
+ ssh_public_key=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ users_service = connection.system_service().users_service()
+ users_module = UsersModule(
+ connection=connection,
+ module=module,
+ service=users_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = users_module.create(
+ search_params={
+ 'usrname': username(module),
+ }
+ )
+ if module.params['ssh_public_key'] is not None:
+ ssh_public_keys_service = users_service.user_service(ret['id']).ssh_public_keys_service()
+ ssh_public_keys = ssh_public_keys_service.list()
+ if ssh_public_keys:
+ if not module.params['ssh_public_key']:
+ ssh_public_keys_service.service(ssh_public_keys[0].id).remove()
+ ret['changed'] = True
+ elif module.params['ssh_public_key'] != ssh_public_keys[0].content:
+ ssh_public_keys_service.service(ssh_public_keys[0].id).update(otypes.SshPublicKey(content=module.params['ssh_public_key']))
+ ret['changed'] = True
+ elif module.params['ssh_public_key']:
+ ssh_public_keys_service.add(otypes.SshPublicKey(content=module.params['ssh_public_key']))
+ ret['changed'] = True
+
+ elif state == 'absent':
+ ret = users_module.remove(
+ search_params={
+ 'usrname': username(module),
+ }
+ )
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user_info.py
new file mode 100644
index 000000000..5f76c008f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user_info.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_user_info
+short_description: Retrieve information about one or more oVirt/RHV users
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV users."
+ - This module was called C(ovirt_user_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_user_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_users), which
+ contains a list of users. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search user X use following pattern: name=X"
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/user/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all users which first names start with C(john):
+- ovirt.ovirt.ovirt_user_info:
+ pattern: name=john*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_users }}"
+'''
+
+RETURN = '''
+ovirt_users:
+ description: "List of dictionaries describing the users. User attributes are mapped to dictionary keys,
+ all users attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ users_service = connection.system_service().users_service()
+ users = users_service.list(
+ search=module.params['pattern'],
+ follow=",".join(module.params['follow'])
+ )
+ result = dict(
+ ovirt_users=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in users
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm.py
new file mode 100644
index 000000000..e1374fcc2
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm.py
@@ -0,0 +1,2917 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vm
+short_description: Module to manage Virtual Machines in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - This module manages whole lifecycle of the Virtual Machine(VM) in oVirt/RHV.
+ - Since VM can hold many states in oVirt/RHV, this see notes to see how the states of the VM are handled.
+options:
+ name:
+ description:
+ - Name of the Virtual Machine to manage.
+ - If VM don't exists C(name) is required. Otherwise C(id) or C(name) can be used.
+ type: str
+ id:
+ description:
+ - ID of the Virtual Machine to manage.
+ type: str
+ state:
+ description:
+ - Should the Virtual Machine be running/stopped/present/absent/suspended/next_run/registered/exported/reboot.
+ When C(state) is I(registered) and the unregistered VM's name
+ belongs to an already registered in engine VM in the same DC
+ then we fail to register the unregistered template.
+ - I(present) state will create/update VM and don't change its state if it already exists.
+ - I(running) state will create/update VM and start it.
+ - I(next_run) state updates the VM and if the VM has next run configuration it will be rebooted.
+ - Please check I(notes) to more detailed description of states.
+ - I(exported) state will export the VM to export domain or as OVA.
+ - I(registered) is supported since 2.4.
+ - I(reboot) is supported since 2.10, virtual machine is rebooted only if it's in up state.
+ - I(reset) sends a reset request to a virtual machine.
+ choices: [ absent, next_run, present, registered, running, stopped, suspended, exported, reboot, reset ]
+ default: present
+ type: str
+ cluster:
+ description:
+ - Name of the cluster, where Virtual Machine should be created.
+ - Required if creating VM.
+ type: str
+ allow_partial_import:
+ description:
+ - Boolean indication whether to allow partial registration of Virtual Machine when C(state) is registered.
+ type: bool
+ vnic_profile_mappings:
+ description:
+ - "Mapper which maps an external virtual NIC profile to one that exists in the engine when C(state) is registered.
+ vnic_profile is described by the following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ source_network_name:
+ description:
+ - The network name of the source network.
+ source_profile_name:
+ description:
+ - The profile name related to the source network.
+ target_profile_id:
+ description:
+ - The id of the target profile id to be mapped to in the engine.
+ cluster_mappings:
+ description:
+ - "Mapper which maps cluster name between VM's OVF and the destination cluster this VM should be registered to,
+ relevant when C(state) is registered.
+ Cluster mapping is described by the following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ source_name:
+ description:
+ - The name of the source cluster.
+ dest_name:
+ description:
+ - The name of the destination cluster.
+ role_mappings:
+ description:
+ - "Mapper which maps role name between VM's OVF and the destination role this VM should be registered to,
+ relevant when C(state) is registered.
+ Role mapping is described by the following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ source_name:
+ description:
+ - The name of the source role.
+ dest_name:
+ description:
+ - The name of the destination role.
+ domain_mappings:
+ description:
+ - "Mapper which maps aaa domain name between VM's OVF and the destination aaa domain this VM should be registered to,
+ relevant when C(state) is registered.
+ The aaa domain mapping is described by the following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ source_name:
+ description:
+ - The name of the source aaa domain.
+ dest_name:
+ description:
+ - The name of the destination aaa domain.
+ affinity_group_mappings:
+ type: list
+ description:
+ - "Mapper which maps affinity name between VM's OVF and the destination affinity this VM should be registered to,
+ relevant when C(state) is registered."
+ elements: dict
+ affinity_label_mappings:
+ type: list
+ description:
+ - "Mapper which maps affinity label name between VM's OVF and the destination label this VM should be registered to,
+ relevant when C(state) is registered."
+ elements: dict
+ lun_mappings:
+ description:
+ - "Mapper which maps lun between VM's OVF and the destination lun this VM should contain, relevant when C(state) is registered.
+ lun_mappings is described by the following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ logical_unit_id:
+ description:
+ - The logical unit number to identify a logical unit,
+ logical_unit_port:
+ description:
+ - The port being used to connect with the LUN disk.
+ logical_unit_portal:
+ description:
+ - The portal being used to connect with the LUN disk.
+ logical_unit_address:
+ description:
+ - The address of the block storage host.
+ logical_unit_target:
+ description:
+ - The iSCSI specification located on an iSCSI server
+ logical_unit_username:
+ description:
+ - Username to be used to connect to the block storage host.
+ logical_unit_password):
+ description:
+ - Password to be used to connect to the block storage host.
+ storage_type:
+ description:
+ - The storage type which the LUN reside on (iscsi or fcp)"
+ reassign_bad_macs:
+ description:
+ - "Boolean indication whether to reassign bad macs when C(state) is registered."
+ type: bool
+ template:
+ description:
+ - Name of the template, which should be used to create Virtual Machine.
+ - Required if creating VM.
+ - If template is not specified and VM doesn't exist, VM will be created from I(Blank) template.
+ type: str
+ template_version:
+ description:
+ - Version number of the template to be used for VM.
+ - By default the latest available version of the template is used.
+ type: int
+ use_latest_template_version:
+ description:
+ - Specify if latest template version should be used, when running a stateless VM.
+ - If this parameter is set to I(yes) stateless VM is created.
+ type: bool
+ storage_domain:
+ description:
+ - Name of the storage domain where all template disks should be created.
+ - This parameter is considered only when C(template) is provided.
+ - IMPORTANT - This parameter is not idempotent, if the VM exists and you specify different storage domain,
+ disk won't move.
+ type: str
+ disk_format:
+ description:
+ - Specify format of the disk.
+ - If C(cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision).
+ - If C(raw) format is used, disk storage will be allocated right away, also known as I(preallocated).
+ - Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
+ - This parameter is considered only when C(template) and C(storage domain) is provided.
+ choices: [ cow, raw ]
+ default: cow
+ type: str
+ memory:
+ description:
+ - Amount of memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ type: str
+ memory_guaranteed:
+ description:
+ - Amount of minimal guaranteed memory of the Virtual Machine.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
+ - Default value is set by engine.
+ type: str
+ memory_max:
+ description:
+ - Upper bound of virtual machine memory up to which memory hot-plug can be performed.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ type: str
+ cpu_shares:
+ description:
+ - Set a CPU shares for this Virtual Machine.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ cpu_cores:
+ description:
+ - Number of virtual CPUs cores of the Virtual Machine.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ cpu_sockets:
+ description:
+ - Number of virtual CPUs sockets of the Virtual Machine.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ cpu_threads:
+ description:
+ - Number of threads per core of the Virtual Machine.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ type:
+ description:
+ - Type of the Virtual Machine.
+ - Default value is set by oVirt/RHV engine.
+ - I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2.
+ choices: [ desktop, server, high_performance ]
+ type: str
+ quota_id:
+ description:
+ - "Virtual Machine quota ID to be used for disk. By default quota is chosen by oVirt/RHV engine."
+ type: str
+ operating_system:
+ description:
+ - Operating system of the Virtual Machine, for example 'rhel_8x64'.
+ - Default value is set by oVirt/RHV engine.
+ - Use the M(ovirt.ovirt.ovirt_vm_os_info) module to obtain the current list.
+ type: str
+ boot_devices:
+ description:
+ - List of boot devices which should be used to boot. For example C([ cdrom, hd ]).
+ - Default value is set by oVirt/RHV engine.
+ choices: [ cdrom, hd, network ]
+ elements: str
+ type: list
+ boot_menu:
+ description:
+ - "I(True) enable menu to select boot device, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ bios_type:
+ description:
+ - "Set bios type, necessary for some operating systems and secure boot."
+ - "If no value is passed, default value is set from cluster."
+ - "NOTE - Supported since oVirt 4.3."
+ choices: [ i440fx_sea_bios, q35_ovmf, q35_sea_bios, q35_secure_boot ]
+ type: str
+ usb_support:
+ description:
+ - "I(True) enable USB support, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ serial_console:
+ description:
+ - "I(True) enable VirtIO serial console, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ sso:
+ description:
+ - "I(True) enable Single Sign On by Guest Agent, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ host:
+ description:
+ - Specify host where Virtual Machine should be running. By default the host is chosen by engine scheduler.
+ - This parameter is used only when C(state) is I(running) or I(present).
+ type: str
+ high_availability:
+ description:
+ - If I(yes) Virtual Machine will be set as highly available.
+ - If I(no) Virtual Machine won't be set as highly available.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: bool
+ high_availability_priority:
+ description:
+ - Indicates the priority of the virtual machine inside the run and migration queues.
+ Virtual machines with higher priorities will be started and migrated before virtual machines with lower
+ priorities. The value is an integer between 0 and 100. The higher the value, the higher the priority.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: int
+ lease:
+ description:
+ - Name of the storage domain this virtual machine lease reside on. Pass an empty string to remove the lease.
+ - NOTE - Supported since oVirt 4.1.
+ type: str
+ custom_compatibility_version:
+ description:
+ - "Enables a virtual machine to be customized to its own compatibility version. If
+ 'C(custom_compatibility_version)' is set, it overrides the cluster's compatibility version
+ for this particular virtual machine."
+ type: str
+ host_devices:
+ description:
+ - Single Root I/O Virtualization - technology that allows single device to expose multiple endpoints that can be passed to VMs
+ - host_devices is an list which contain dictionary with name and state of device
+ type: list
+ elements: dict
+ delete_protected:
+ description:
+ - If I(yes) Virtual Machine will be set as delete protected.
+ - If I(no) Virtual Machine won't be set as delete protected.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: bool
+ stateless:
+ description:
+ - If I(yes) Virtual Machine will be set as stateless.
+ - If I(no) Virtual Machine will be unset as stateless.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: bool
+ clone:
+ description:
+ - If I(yes) then the disks of the created virtual machine will be cloned and independent of the template.
+ - This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before.
+ type: bool
+ default: 'no'
+ clone_permissions:
+ description:
+ - If I(yes) then the permissions of the template (only the direct ones, not the inherited ones)
+ will be copied to the created virtual machine.
+ - This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before.
+ type: bool
+ default: 'no'
+ cd_iso:
+ description:
+ - ISO file from ISO storage domain which should be attached to Virtual Machine.
+ - If you have multiple ISO disks with the same name use disk ID to specify which should be used or use C(storage_domain) to filter disks.
+ - If you pass empty string the CD will be ejected from VM.
+ - If used with C(state) I(running) or I(present) and VM is running the CD will be attached to VM.
+ - If used with C(state) I(running) or I(present) and VM is down the CD will be attached to VM persistently.
+ type: str
+ force:
+ description:
+ - Please check to I(Synopsis) to more detailed description of force parameter, it can behave differently
+ in different situations.
+ type: bool
+ default: 'no'
+ nics:
+ description:
+ - List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the NIC.
+ profile_name:
+ description:
+ - Profile name where NIC should be attached.
+ interface:
+ description:
+ - Type of the network interface.
+ choices: ['virtio', 'e1000', 'rtl8139']
+ default: 'virtio'
+ mac_address:
+ description:
+ - Custom MAC address of the network interface, by default it's obtained from MAC pool.
+ - "NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
+ To manage NICs of the VM in more depth please use M(ovirt.ovirt.ovirt_nic) module instead."
+ disks:
+ description:
+ - List of disks, which should be attached to Virtual Machine. Disk is described by following dictionary.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the disk. Either C(name) or C(id) is required.
+ id:
+ description:
+ - ID of the disk. Either C(name) or C(id) is required.
+ interface:
+ description:
+ - Interface of the disk.
+ choices: ['virtio', 'ide']
+ default: 'virtio'
+ bootable:
+ description:
+ - I(True) if the disk should be bootable, default is non bootable.
+ type: bool
+ activate:
+ description:
+ - I(True) if the disk should be activated, default is activated.
+ - "NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only attach disks.
+ To manage disks of the VM in more depth please use M(ovirt.ovirt.ovirt_disk) module instead."
+ type: bool
+ sysprep:
+ description:
+ - Dictionary with values for Windows Virtual Machine initialization using sysprep.
+ type: dict
+ suboptions:
+ host_name:
+ description:
+ - Hostname to be set to Virtual Machine when deployed.
+ active_directory_ou:
+ description:
+ - Active Directory Organizational Unit, to be used for login of user.
+ org_name:
+ description:
+ - Organization name to be set to Windows Virtual Machine.
+ domain:
+ description:
+ - Domain to be set to Windows Virtual Machine.
+ timezone:
+ description:
+ - Timezone to be set to Windows Virtual Machine.
+ ui_language:
+ description:
+ - UI language of the Windows Virtual Machine.
+ system_locale:
+ description:
+ - System localization of the Windows Virtual Machine.
+ input_locale:
+ description:
+ - Input localization of the Windows Virtual Machine.
+ windows_license_key:
+ description:
+ - License key to be set to Windows Virtual Machine.
+ user_name:
+ description:
+ - Username to be used for set password to Windows Virtual Machine.
+ root_password:
+ description:
+ - Password to be set for username to Windows Virtual Machine.
+ custom_script:
+ description:
+ - A custom Sysprep definition in the format of a complete unattended installation answer file.
+ cloud_init:
+ description:
+ - Dictionary with values for Unix-like Virtual Machine initialization using cloud init.
+ type: dict
+ suboptions:
+ host_name:
+ description:
+ - Hostname to be set to Virtual Machine when deployed.
+ timezone:
+ description:
+ - Timezone to be set to Virtual Machine when deployed.
+ user_name:
+ description:
+ - Username to be used to set password to Virtual Machine when deployed.
+ root_password:
+ description:
+ - Password to be set for user specified by C(user_name) parameter.
+ authorized_ssh_keys:
+ description:
+ - Use this SSH keys to login to Virtual Machine.
+ regenerate_ssh_keys:
+ description:
+ - If I(True) SSH keys will be regenerated on Virtual Machine.
+ type: bool
+ custom_script:
+ description:
+ - Cloud-init script which will be executed on Virtual Machine when deployed.
+ - This is appended to the end of the cloud-init script generated by any other options.
+ - For further information, refer to cloud-init User-Data documentation.
+ dns_servers:
+ description:
+ - DNS servers to be configured on Virtual Machine, maximum of two, space-separated.
+ dns_search:
+ description:
+ - DNS search domains to be configured on Virtual Machine.
+ nic_boot_protocol:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine.
+ choices: ['none', 'dhcp', 'static']
+ nic_ip_address:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ nic_boot_protocol_v6:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine.
+ choices: ['none', 'dhcp', 'static']
+ nic_ip_address_v6:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask_v6:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway_v6:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ - For IPv6 addresses the value is an integer in the range of 0-128, which represents the subnet prefix.
+ nic_name:
+ description:
+ - Set name to network interface of Virtual Machine.
+ cloud_init_nics:
+ description:
+ - List of dictionaries representing network interfaces to be setup by cloud init.
+ - This option is used, when user needs to setup more network interfaces via cloud init.
+ - If one network interface is enough, user should use C(cloud_init) I(nic_*) parameters. C(cloud_init) I(nic_*) parameters
+ are merged with C(cloud_init_nics) parameters.
+ type: list
+ elements: dict
+ suboptions:
+ nic_boot_protocol:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
+ nic_ip_address:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ nic_boot_protocol_v6:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
+ nic_ip_address_v6:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask_v6:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway_v6:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ - For IPv6 addresses the value is an integer in the range of 0-128, which represents the subnet prefix.
+ nic_name:
+ description:
+ - Set name to network interface of Virtual Machine.
+ cloud_init_persist:
+ description:
+ - "If I(yes) the C(cloud_init) or C(sysprep) parameters will be saved for the virtual machine
+ and the virtual machine won't be started as run-once."
+ type: bool
+ aliases: [ 'sysprep_persist' ]
+ default: 'no'
+ kernel_params_persist:
+ description:
+ - "If I(true) C(kernel_params), C(initrd_path) and C(kernel_path) will persist in virtual machine configuration,
+ if I(False) it will be used for run once."
+ type: bool
+ default: false
+ kernel_path:
+ description:
+ - Path to a kernel image used to boot the virtual machine.
+ - Kernel image must be stored on either the ISO domain or on the host's storage.
+ type: str
+ initrd_path:
+ description:
+ - Path to an initial ramdisk to be used with the kernel specified by C(kernel_path) option.
+ - Ramdisk image must be stored on either the ISO domain or on the host's storage.
+ type: str
+ kernel_params:
+ description:
+ - Kernel command line parameters (formatted as string) to be used with the kernel specified by C(kernel_path) option.
+ type: str
+ instance_type:
+ description:
+ - Name of virtual machine's hardware configuration.
+ - By default no instance type is used.
+ type: str
+ description:
+ description:
+ - Description of the Virtual Machine.
+ type: str
+ comment:
+ description:
+ - Comment of the Virtual Machine.
+ type: str
+ timezone:
+ description:
+ - Sets time zone offset of the guest hardware clock.
+ - For example C(Etc/GMT)
+ type: str
+ serial_policy:
+ description:
+ - Specify a serial number policy for the Virtual Machine.
+ - Following options are supported.
+ - C(vm) - Sets the Virtual Machine's UUID as its serial number.
+ - C(host) - Sets the host's UUID as the Virtual Machine's serial number.
+ - C(custom) - Allows you to specify a custom serial number in C(serial_policy_value).
+ choices: ['vm', 'host', 'custom']
+ type: str
+ serial_policy_value:
+ description:
+ - Allows you to specify a custom serial number.
+ - This parameter is used only when C(serial_policy) is I(custom).
+ type: str
+ vmware:
+ description:
+ - Dictionary of values to be used to connect to VMware and import
+ a virtual machine to oVirt.
+ type: dict
+ suboptions:
+ username:
+ description:
+ - The username to authenticate against the VMware.
+ password:
+ description:
+ - The password to authenticate against the VMware.
+ url:
+ description:
+ - The URL to be passed to the I(virt-v2v) tool for conversion.
+ - For example I(vpx://wmware_user@vcenter-host/DataCenter/Cluster/esxi-host?no_verify=1)
+ drivers_iso:
+ description:
+ - The name of the ISO containing drivers that can be used during the I(virt-v2v) conversion process.
+ sparse:
+ description:
+ - Specifies the disk allocation policy of the resulting virtual machine. I(true) for sparse, I(false) for preallocated.
+ type: bool
+ default: true
+ storage_domain:
+ description:
+ - Specifies the target storage domain for converted disks. This is required parameter.
+ xen:
+ description:
+ - Dictionary of values to be used to connect to XEN and import
+ a virtual machine to oVirt.
+ type: dict
+ suboptions:
+ url:
+ description:
+ - The URL to be passed to the I(virt-v2v) tool for conversion.
+ - For example I(xen+ssh://root@zen.server). This is required parameter.
+ drivers_iso:
+ description:
+ - The name of the ISO containing drivers that can be used during the I(virt-v2v) conversion process.
+ sparse:
+ description:
+ - Specifies the disk allocation policy of the resulting virtual machine. I(true) for sparse, I(false) for preallocated.
+ type: bool
+ default: true
+ storage_domain:
+ description:
+ - Specifies the target storage domain for converted disks. This is required parameter.
+ kvm:
+ description:
+ - Dictionary of values to be used to connect to kvm and import
+ a virtual machine to oVirt.
+ type: dict
+ suboptions:
+ name:
+ description:
+ - The name of the KVM virtual machine.
+ username:
+ description:
+ - The username to authenticate against the KVM.
+ password:
+ description:
+ - The password to authenticate against the KVM.
+ url:
+ description:
+ - The URL to be passed to the I(virt-v2v) tool for conversion.
+ - For example I(qemu:///system). This is required parameter.
+ drivers_iso:
+ description:
+ - The name of the ISO containing drivers that can be used during the I(virt-v2v) conversion process.
+ sparse:
+ description:
+ - Specifies the disk allocation policy of the resulting virtual machine. I(true) for sparse, I(false) for preallocated.
+ type: bool
+ default: true
+ storage_domain:
+ description:
+ - Specifies the target storage domain for converted disks. This is required parameter.
+ cpu_mode:
+ description:
+ - "CPU mode of the virtual machine. It can be some of the following: I(host_passthrough), I(host_model) or I(custom)."
+ - "For I(host_passthrough) CPU type you need to set C(placement_policy) to I(pinned)."
+ - "If no value is passed, default value is set by oVirt/RHV engine."
+ type: str
+ placement_policy:
+ description:
+ - "The configuration of the virtual machine's placement policy."
+ - "If no value is passed, default value is set by oVirt/RHV engine."
+ - "Placement policy can be one of the following values:"
+ type: str
+ suboptions:
+ migratable:
+ description:
+ - "Allow manual and automatic migration."
+ pinned:
+ description:
+ - "Do not allow migration."
+ user_migratable:
+ description:
+ - "Allow manual migration only."
+ placement_policy_hosts:
+ description:
+ - "List of host names."
+ type: list
+ elements: str
+ ticket:
+ description:
+ - "If I(true), in addition return I(remote_vv_file) inside I(vm) dictionary, which contains compatible
+ content for remote-viewer application. Works only C(state) is I(running)."
+ type: bool
+ cpu_pinning:
+ description:
+ - "CPU Pinning topology to map virtual machine CPU to host CPU."
+ - "CPU Pinning topology is a list of dictionary which can have following values:"
+ type: list
+ elements: dict
+ suboptions:
+ cpu:
+ description:
+ - "Number of the host CPU."
+ vcpu:
+ description:
+ - "Number of the virtual machine CPU."
+ soundcard_enabled:
+ description:
+ - "If I(true), the sound card is added to the virtual machine."
+ type: bool
+ smartcard_enabled:
+ description:
+ - "If I(true), use smart card authentication."
+ type: bool
+ io_threads:
+ description:
+ - "Number of IO threads used by virtual machine. I(0) means IO threading disabled."
+ type: int
+ ballooning_enabled:
+ description:
+ - "If I(true), use memory ballooning."
+ - "Memory balloon is a guest device, which may be used to re-distribute / reclaim the host memory
+ based on VM needs in a dynamic way. In this way it's possible to create memory over commitment states."
+ type: bool
+ numa_tune_mode:
+ description:
+ - "Set how the memory allocation for NUMA nodes of this VM is applied (relevant if NUMA nodes are set for this VM)."
+ - "It can be one of the following: I(interleave), I(preferred) or I(strict)."
+ - "If no value is passed, default value is set by oVirt/RHV engine."
+ choices: ['interleave', 'preferred', 'strict']
+ type: str
+ numa_nodes:
+ description:
+ - "List of vNUMA Nodes to set for this VM and pin them to assigned host's physical NUMA node."
+ - "Each vNUMA node is described by following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ index:
+ description:
+ - "The index of this NUMA node."
+ required: True
+ memory:
+ description:
+ - "Memory size of the NUMA node in MiB."
+ required: True
+ cores:
+ description:
+ - "List of VM CPU cores indexes to be included in this NUMA node."
+ type: list
+ elements: int
+ required: True
+ numa_node_pins:
+ description:
+ - "List of physical NUMA node indexes to pin this virtual NUMA node to."
+ type: list
+ elements: int
+ rng_device:
+ description:
+ - "Random number generator (RNG). You can choose of one the following devices I(urandom), I(random) or I(hwrng)."
+ - "In order to select I(hwrng), you must have it enabled on cluster first."
+ - "/dev/urandom is used for cluster version >= 4.1, and /dev/random for cluster version <= 4.0"
+ type: str
+ custom_properties:
+ description:
+ - "Properties sent to VDSM to configure various hooks."
+ - "Custom properties is a list of dictionary which can have following values:"
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - "Name of the custom property. For example: I(hugepages), I(vhost), I(sap_agent), etc."
+ regexp:
+ description:
+ - "Regular expression to set for custom property."
+ value:
+ description:
+ - "Value to set for custom property."
+ watchdog:
+ description:
+ - "Assign watchdog device for the virtual machine."
+ - "Watchdogs is a dictionary which can have following values:"
+ type: dict
+ suboptions:
+ model:
+ description:
+ - "Model of the watchdog device. For example: I(i6300esb), I(diag288) or I(null)."
+ action:
+ description:
+ - "Watchdog action to be performed when watchdog is triggered. For example: I(none), I(reset), I(poweroff), I(pause) or I(dump)."
+ graphical_console:
+ description:
+ - "Assign graphical console to the virtual machine."
+ type: dict
+ suboptions:
+ headless_mode:
+ description:
+ - If I(true) disable the graphics console for this virtual machine.
+ type: bool
+ protocol:
+ description:
+ - Graphical protocol, a list of I(spice), I(vnc), or both.
+ type: list
+ elements: str
+ disconnect_action:
+ description:
+ - "Returns the action that will take place when the graphic console(SPICE only) is disconnected. The options are:"
+ - I(none) No action is taken.
+ - I(lock_screen) Locks the currently active user session.
+ - I(logout) Logs out the currently active user session.
+ - I(reboot) Initiates a graceful virtual machine reboot.
+ - I(shutdown) Initiates a graceful virtual machine shutdown.
+ type: str
+ keyboard_layout:
+ description:
+ - The keyboard layout to use with this graphic console.
+ - This option is only available for the VNC console type.
+ - If no keyboard is enabled then it won't be reported.
+ type: str
+ monitors:
+ description:
+ - The number of monitors opened for this graphic console.
+ - This option is only available for the SPICE protocol.
+ - Possible values are 1, 2 or 4.
+ type: int
+ copy_paste_enabled:
+ description:
+ - Indicates whether a user is able to copy and paste content from an external host into the graphic console.
+ - This option is only available for the SPICE console type.
+ type: bool
+ file_transfer_enabled:
+ description:
+ - Indicates if a user is able to drag and drop files from an external host into the graphic console.
+ - This option is only available for the SPICE console type.
+ type: bool
+ exclusive:
+ description:
+ - "When C(state) is I(exported) this parameter indicates if the existing VM with the
+ same name should be overwritten."
+ type: bool
+ export_domain:
+ description:
+ - "When C(state) is I(exported)this parameter specifies the name of the export storage domain."
+ type: str
+ export_ova:
+ description:
+ - Dictionary of values to be used to export VM as OVA.
+ type: dict
+ suboptions:
+ host:
+ description:
+ - The name of the destination host where the OVA has to be exported.
+ directory:
+ description:
+ - The name of the directory where the OVA has to be exported.
+ filename:
+ description:
+ - The name of the exported OVA file.
+ force_migrate:
+ description:
+ - If I(true), the VM will migrate when I(placement_policy=user-migratable) but not when I(placement_policy=pinned).
+ type: bool
+ migrate:
+ description:
+ - "If I(true), the VM will migrate to any available host."
+ type: bool
+ next_run:
+ description:
+ - "If I(true), the update will not be applied to the VM immediately and will be only applied when virtual machine is restarted."
+ - NOTE - If there are multiple next run configuration changes on the VM, the first change may get reverted if this option is not passed.
+ type: bool
+ snapshot_name:
+ description:
+ - "Snapshot to clone VM from."
+ - "Snapshot with description specified should exist."
+ - "You have to specify C(snapshot_vm) parameter with virtual machine name of this snapshot."
+ type: str
+ snapshot_vm:
+ description:
+ - "Source VM to clone VM from."
+ - "VM should have snapshot specified by C(snapshot)."
+ - "If C(snapshot_name) specified C(snapshot_vm) is required."
+ type: str
+ custom_emulated_machine:
+ description:
+ - "Sets the value of the custom_emulated_machine attribute."
+ type: str
+ virtio_scsi_enabled:
+ description:
+ - "Enable Virtio SCSI support."
+ type: bool
+ version_added: 1.7.0
+ multi_queues_enabled:
+ description:
+ - "If `true`, each virtual interface will get the optimal number of queues, depending on the available virtual Cpus."
+ type: bool
+ version_added: 1.7.0
+ virtio_scsi_multi_queues:
+ description:
+ - "Number of queues for a Virtio-SCSI controller, possible values:
+ -1 - Indicates that the queues will be automatically set.
+ 0 - Indicates that the Virtio SCSI multi-queue will be disabled.
+ >0 - Number of Virtio SCSI queues to use by virtual machine."
+ type: int
+ version_added: 1.7.0
+ wait_after_lease:
+ description:
+ - "Number of seconds which should the module wait after the lease is changed."
+ type: int
+ default: 5
+ version_added: 2.1.0
+ volatile:
+ description:
+ - "Indicates that this run configuration will be discarded even in the case of guest-initiated reboot."
+ type: bool
+ version_added: 2.2.0
+notes:
+ - If VM is in I(UNASSIGNED) or I(UNKNOWN) state before any operation, the module will fail.
+ If VM is in I(IMAGE_LOCKED) state before any operation, we try to wait for VM to be I(DOWN).
+ If VM is in I(SAVING_STATE) state before any operation, we try to wait for VM to be I(SUSPENDED).
+ If VM is in I(POWERING_DOWN) state before any operation, we try to wait for VM to be I(UP) or I(DOWN). VM can
+ get into I(UP) state from I(POWERING_DOWN) state, when there is no ACPI or guest agent running inside VM, or
+ if the shutdown operation fails.
+ When user specify I(UP) C(state), we always wait to VM to be in I(UP) state in case VM is I(MIGRATING),
+ I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). In other states we run start operation on VM.
+ When user specify I(stopped) C(state), and If user pass C(force) parameter set to I(true) we forcibly stop the VM in
+ any state. If user don't pass C(force) parameter, we always wait to VM to be in UP state in case VM is
+ I(MIGRATING), I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or
+ I(SUSPENDED) state, we start the VM. Then we gracefully shutdown the VM.
+ When user specify I(suspended) C(state), we always wait to VM to be in UP state in case VM is I(MIGRATING),
+ I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or I(DOWN) state,
+ we start the VM. Then we suspend the VM.
+ When user specify I(absent) C(state), we forcibly stop the VM in any state and remove it.
+ - "If you update a VM parameter that requires a reboot, the oVirt engine always creates a new snapshot for the VM,
+ and an Ansible playbook will report this as changed."
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Creates a new Virtual Machine from template named 'rhel7_template'
+ ovirt.ovirt.ovirt_vm:
+ state: present
+ name: myvm
+ template: rhel7_template
+ cluster: mycluster
+
+- name: Register VM
+ ovirt.ovirt.ovirt_vm:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ name: myvm
+
+- name: Register VM using id
+ ovirt.ovirt.ovirt_vm:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+
+- name: Register VM, allowing partial import
+ ovirt.ovirt.ovirt_vm:
+ state: registered
+ storage_domain: mystorage
+ allow_partial_import: "True"
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+
+- name: Register VM with vnic profile mappings and reassign bad macs
+ ovirt.ovirt.ovirt_vm:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+ vnic_profile_mappings:
+ - source_network_name: mynetwork
+ source_profile_name: mynetwork
+ target_profile_id: 3333-3333-3333-3333
+ - source_network_name: mynetwork2
+ source_profile_name: mynetwork2
+ target_profile_id: 4444-4444-4444-4444
+ reassign_bad_macs: "True"
+
+- name: Register VM with mappings
+ ovirt.ovirt.ovirt_vm:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+ role_mappings:
+ - source_name: Role_A
+ dest_name: Role_B
+ domain_mappings:
+ - source_name: Domain_A
+ dest_name: Domain_B
+ lun_mappings:
+ - source_storage_type: iscsi
+ source_logical_unit_id: 1IET_000d0001
+ source_logical_unit_port: 3260
+ source_logical_unit_portal: 1
+ source_logical_unit_address: 10.34.63.203
+ source_logical_unit_target: iqn.2016-08-09.brq.str-01:omachace
+ dest_storage_type: iscsi
+ dest_logical_unit_id: 1IET_000d0002
+ dest_logical_unit_port: 3260
+ dest_logical_unit_portal: 1
+ dest_logical_unit_address: 10.34.63.204
+ dest_logical_unit_target: iqn.2016-08-09.brq.str-02:omachace
+ affinity_group_mappings:
+ - source_name: Affinity_A
+ dest_name: Affinity_B
+ affinity_label_mappings:
+ - source_name: Label_A
+ dest_name: Label_B
+ cluster_mappings:
+ - source_name: cluster_A
+ dest_name: cluster_B
+
+- name: Creates a stateless VM which will always use latest template version
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ template: rhel7
+ cluster: mycluster
+ use_latest_template_version: true
+
+# Creates a new server rhel7 Virtual Machine from Blank template
+# on brq01 cluster with 2GiB memory and 2 vcpu cores/sockets
+# and attach bootable disk with name rhel7_disk and attach virtio NIC
+- ovirt.ovirt.ovirt_vm:
+ state: present
+ cluster: brq01
+ name: myvm
+ memory: 2GiB
+ cpu_cores: 2
+ cpu_sockets: 2
+ cpu_shares: 1024
+ type: server
+ operating_system: rhel_7x64
+ disks:
+ - name: rhel7_disk
+ bootable: True
+ nics:
+ - name: nic1
+
+# Change VM Name
+- ovirt.ovirt.ovirt_vm:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_vm_name"
+
+- name: Run VM with cloud init
+ ovirt.ovirt.ovirt_vm:
+ name: rhel7
+ template: rhel7
+ cluster: Default
+ memory: 1GiB
+ high_availability: true
+ high_availability_priority: 50 # Available from Ansible 2.5
+ cloud_init:
+ dns_servers: '8.8.8.8 8.8.4.4'
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+ nic_name: eth1
+ host_name: example.com
+ custom_script: |
+ write_files:
+ - content: |
+ Hello, world!
+ path: /tmp/greeting.txt
+ permissions: '0644'
+ user_name: root
+ root_password: super_password
+
+- name: Run VM with cloud init, with multiple network interfaces
+ ovirt.ovirt.ovirt_vm:
+ name: rhel7_4
+ template: rhel7
+ cluster: mycluster
+ cloud_init_nics:
+ - nic_name: eth0
+ nic_boot_protocol: dhcp
+ - nic_name: eth1
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+ # IP version 6 parameters are supported since ansible 2.9
+ - nic_name: eth2
+ nic_boot_protocol_v6: static
+ nic_ip_address_v6: '2620:52:0:2282:b898:1f69:6512:36c5'
+ nic_gateway_v6: '2620:52:0:2282:b898:1f69:6512:36c9'
+ nic_netmask_v6: '120'
+ - nic_name: eth3
+ nic_boot_protocol_v6: dhcp
+
+- name: Run VM with sysprep
+ ovirt.ovirt.ovirt_vm:
+ name: windows2012R2_AD
+ template: windows2012R2
+ cluster: Default
+ memory: 3GiB
+ high_availability: true
+ sysprep:
+ host_name: windowsad.example.com
+ user_name: Administrator
+ root_password: SuperPassword123
+
+- name: Migrate/Run VM to/on host named 'host1'
+ ovirt.ovirt.ovirt_vm:
+ state: running
+ name: myvm
+ host: host1
+
+- name: Migrate/Run VM to/on host named 'host1' on cluster 'cluster1'
+ ovirt.ovirt.ovirt_vm:
+ state: running
+ name: myvm
+ host: host1
+ cluster: cluster1
+
+- name: Migrate VM to any available host
+ ovirt.ovirt.ovirt_vm:
+ state: running
+ name: myvm
+ migrate: true
+
+- name: Change VMs CD
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ cd_iso: drivers.iso
+
+- name: Eject VMs CD
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ cd_iso: ''
+
+- name: Boot VM from CD
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ cd_iso: centos7_x64.iso
+ boot_devices:
+ - cdrom
+
+- name: Stop vm
+ ovirt.ovirt.ovirt_vm:
+ state: stopped
+ name: myvm
+
+- name: Upgrade memory to already created VM
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ memory: 4GiB
+
+- name: Hot plug memory to already created and running VM (VM won't be restarted)
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ memory: 4GiB
+
+# Create/update a VM to run with two vNUMA nodes and pin them to physical NUMA nodes as follows:
+# vnuma index 0-> numa index 0, vnuma index 1-> numa index 1
+- name: Create a VM to run with two vNUMA nodes
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ cluster: mycluster
+ numa_tune_mode: "interleave"
+ numa_nodes:
+ - index: 0
+ cores: [0]
+ memory: 20
+ numa_node_pins: [0]
+ - index: 1
+ cores: [1]
+ memory: 30
+ numa_node_pins: [1]
+
+- name: Update an existing VM to run without previously created vNUMA nodes (i.e. remove all vNUMA nodes+NUMA pinning setting)
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ cluster: mycluster
+ state: "present"
+ numa_tune_mode: "interleave"
+ numa_nodes:
+ - index: -1
+
+# When change on the VM needs restart of the VM, use next_run state,
+# The VM will be updated and rebooted if there are any changes.
+# If present state would be used, VM won't be restarted.
+- ovirt.ovirt.ovirt_vm:
+ state: next_run
+ name: myvm
+ boot_devices:
+ - network
+
+- name: Import virtual machine from VMware
+ ovirt.ovirt.ovirt_vm:
+ state: stopped
+ cluster: mycluster
+ name: vmware_win10
+ timeout: 1800
+ poll_interval: 30
+ vmware:
+ url: vpx://user@1.2.3.4/Folder1/Cluster1/2.3.4.5?no_verify=1
+ name: windows10
+ storage_domain: mynfs
+ username: user
+ password: password
+
+- name: Create vm from template and create all disks on specific storage domain
+ ovirt.ovirt.ovirt_vm:
+ name: vm_test
+ cluster: mycluster
+ template: mytemplate
+ storage_domain: mynfs
+ nics:
+ - name: nic1
+
+- name: Remove VM, if VM is running it will be stopped
+ ovirt.ovirt.ovirt_vm:
+ state: absent
+ name: myvm
+
+# Defining a specific quota for a VM:
+# Since Ansible 2.5
+- ovirt.ovirt.ovirt_quotas_info:
+ data_center: Default
+ name: myquota
+ register: ovirt_quotas
+- ovirt.ovirt.ovirt_vm:
+ name: myvm
+ sso: False
+ boot_menu: True
+ bios_type: q35_ovmf
+ usb_support: True
+ serial_console: True
+ quota_id: "{{ ovirt_quotas[0]['id'] }}"
+
+- name: Create a VM that has the console configured for both Spice and VNC
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ template: mytemplate
+ cluster: mycluster
+ graphical_console:
+ protocol:
+ - spice
+ - vnc
+
+# Execute remote viewer to VM
+- block:
+ - name: Create a ticket for console for a running VM
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ ticket: true
+ state: running
+ register: myvm
+
+ - name: Save ticket to file
+ ansible.builtin.copy:
+ content: "{{ myvm.vm.remote_vv_file }}"
+ dest: ~/vvfile.vv
+
+ - name: Run remote viewer with file
+ ansible.builtin.command: remote-viewer ~/vvfile.vv
+
+# Default value of host_device state is present
+- name: Attach host devices to virtual machine
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ host: myhost
+ placement_policy: pinned
+ host_devices:
+ - name: pci_0000_00_06_0
+ - name: pci_0000_00_07_0
+ state: absent
+ - name: pci_0000_00_08_0
+ state: present
+
+- name: Add placement policy with multiple hosts
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ placement_policy: migratable
+ placement_policy_hosts:
+ - host1
+ - host2
+
+- name: Export the VM as OVA
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ state: exported
+ cluster: mycluster
+ export_ova:
+ host: myhost
+ filename: myvm.ova
+ directory: /tmp/
+
+- name: Clone VM from snapshot
+ ovirt.ovirt.ovirt_vm:
+ snapshot_vm: myvm
+ snapshot_name: myvm_snap
+ name: myvm_clone
+ state: present
+
+- name: Import external ova VM
+ ovirt.ovirt.ovirt_vm:
+ cluster: mycluster
+ name: myvm
+ host: myhost
+ timeout: 1800
+ poll_interval: 30
+ kvm:
+ name: myvm
+ url: ova:///path/myvm.ova
+ storage_domain: mystorage
+
+- name: Cpu pinning of 0#12_1#13_2#14_3#15
+ ovirt.ovirt.ovirt_vm:
+ state: present
+ cluster: mycluster
+ name: myvm
+ cpu_pinning:
+ - cpu: 12
+ vcpu: 0
+ - cpu: 13
+ vcpu: 1
+ - cpu: 14
+ vcpu: 2
+ - cpu: 15
+ vcpu: 3
+'''
+
+
+RETURN = '''
+id:
+ description: ID of the VM which is managed
+ returned: On success if VM is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+vm:
+ description: "Dictionary of all the VM attributes. VM attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm.
+ Additionally when user sent ticket=true, this module will return also remote_vv_file
+ parameter in vm dictionary, which contains remote-viewer compatible file to open virtual
+ machine console. Please note that this file contains sensible information."
+ returned: On success if VM is found.
+ type: dict
+'''
+import traceback
+import time
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_params,
+ check_sdk,
+ convert_to_bytes,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ get_entity,
+ get_link_name,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+ wait,
+)
+
+
+class VmsModule(BaseModule):
+
+ def __init__(self, *args, **kwargs):
+ super(VmsModule, self).__init__(*args, **kwargs)
+ self._initialization = None
+ self._is_new = False
+
+ def __get_template_with_version(self):
+ """
+ oVirt/RHV in version 4.1 doesn't support search by template+version_number,
+ so we need to list all templates with specific name and then iterate
+ through it's version until we find the version we look for.
+ """
+ template = None
+ templates_service = self._connection.system_service().templates_service()
+ if self._is_new:
+ if self.param('template'):
+ clusters_service = self._connection.system_service().clusters_service()
+ cluster = search_by_name(clusters_service, self.param('cluster'))
+ data_center = self._connection.follow_link(cluster.data_center)
+ templates = templates_service.list(
+ search='name=%s and datacenter=%s' % (self.param('template'), data_center.name)
+ )
+ if self.param('template_version'):
+ templates = [
+ t for t in templates
+ if t.version.version_number == self.param('template_version')
+ ]
+ if not templates:
+ raise ValueError(
+ "Template with name '%s' and version '%s' in data center '%s' was not found" % (
+ self.param('template'),
+ self.param('template_version'),
+ data_center.name
+ )
+ )
+ template = sorted(templates, key=lambda t: t.version.version_number, reverse=True)[0]
+ else:
+ # If template isn't specified and VM is about to be created specify default template:
+ template = templates_service.template_service('00000000-0000-0000-0000-000000000000').get()
+ else:
+ templates = templates_service.list(
+ search='vm.name=%s' % self.param('name')
+ )
+ if templates:
+ template = templates[0]
+ if self.param('template') is not None and self.param('template') != template.name:
+ raise ValueError("You can not change template of the Virtual Machine.")
+
+ return template
+
+ def __get_storage_domain_and_all_template_disks(self, template):
+
+ if self.param('template') is None:
+ return None
+
+ if self.param('storage_domain') is None:
+ return None
+
+ disks = list()
+
+ for att in self._connection.follow_link(template.disk_attachments):
+ disks.append(
+ otypes.DiskAttachment(
+ disk=otypes.Disk(
+ id=att.disk.id,
+ format=otypes.DiskFormat(self.param('disk_format')),
+ sparse=self.param('disk_format') != 'raw',
+ storage_domains=[
+ otypes.StorageDomain(
+ id=get_id_by_name(
+ self._connection.system_service().storage_domains_service(),
+ self.param('storage_domain')
+ )
+ )
+ ]
+ )
+ )
+ )
+
+ return disks
+
+ def __get_snapshot(self):
+
+ if self.param('snapshot_vm') is None:
+ return None
+
+ if self.param('snapshot_name') is None:
+ return None
+
+ vms_service = self._connection.system_service().vms_service()
+ vm_id = get_id_by_name(vms_service, self.param('snapshot_vm'))
+ vm_service = vms_service.vm_service(vm_id)
+
+ snaps_service = vm_service.snapshots_service()
+ snaps = snaps_service.list()
+ snap = next(
+ (s for s in snaps if s.description == self.param('snapshot_name')),
+ None
+ )
+ if not snap:
+ raise ValueError('Snapshot with the name "{0}" was not found.'.format(self.param('snapshot_name')))
+ return snap
+
+ def __get_placement_policy(self):
+ hosts = None
+ if self.param('placement_policy_hosts'):
+ hosts = [otypes.Host(name=host) for host in self.param('placement_policy_hosts')]
+ elif self.param('host'):
+ hosts = [otypes.Host(name=self.param('host'))]
+ if self.param('placement_policy'):
+ return otypes.VmPlacementPolicy(
+ affinity=otypes.VmAffinity(self.param('placement_policy')),
+ hosts=hosts
+ )
+ return None
+
+ def __get_cluster(self):
+ if self.param('cluster') is not None:
+ return self.param('cluster')
+ elif self.param('snapshot_name') is not None and self.param('snapshot_vm') is not None:
+ vms_service = self._connection.system_service().vms_service()
+ vm = search_by_name(vms_service, self.param('snapshot_vm'))
+ return self._connection.system_service().clusters_service().cluster_service(vm.cluster.id).get().name
+
+ def build_entity(self):
+ template = self.__get_template_with_version()
+ cluster = self.__get_cluster()
+ snapshot = self.__get_snapshot()
+ placement_policy = self.__get_placement_policy()
+ display = self.param('graphical_console') or dict()
+
+ disk_attachments = self.__get_storage_domain_and_all_template_disks(template)
+
+ return otypes.Vm(
+ id=self.param('id'),
+ name=self.param('name'),
+ cluster=otypes.Cluster(
+ name=cluster
+ ) if cluster else None,
+ disk_attachments=disk_attachments,
+ template=otypes.Template(
+ id=template.id,
+ ) if template else None,
+ use_latest_template_version=self.param('use_latest_template_version'),
+ stateless=self.param('stateless') or self.param('use_latest_template_version'),
+ delete_protected=self.param('delete_protected'),
+ custom_emulated_machine=self.param('custom_emulated_machine'),
+ bios=(
+ otypes.Bios(
+ boot_menu=otypes.BootMenu(enabled=self.param('boot_menu')) if self.param('boot_menu') is not None else None,
+ type=otypes.BiosType[self.param('bios_type').upper()] if self.param('bios_type') is not None else None
+ )
+ ) if self.param('boot_menu') is not None or self.param('bios_type') is not None else None,
+ console=(
+ otypes.Console(enabled=self.param('serial_console'))
+ ) if self.param('serial_console') is not None else None,
+ usb=(
+ otypes.Usb(enabled=self.param('usb_support'))
+ ) if self.param('usb_support') is not None else None,
+ sso=(
+ otypes.Sso(
+ methods=[otypes.Method(id=otypes.SsoMethod.GUEST_AGENT)] if self.param('sso') else []
+ )
+ ) if self.param('sso') is not None else None,
+ quota=otypes.Quota(id=self._module.params.get('quota_id')) if self.param('quota_id') is not None else None,
+ high_availability=otypes.HighAvailability(
+ enabled=self.param('high_availability'),
+ priority=self.param('high_availability_priority'),
+ ) if self.param('high_availability') is not None or self.param('high_availability_priority') else None,
+ lease=otypes.StorageDomainLease(
+ storage_domain=otypes.StorageDomain(
+ id=get_id_by_name(
+ service=self._connection.system_service().storage_domains_service(),
+ name=self.param('lease')
+ ) if self.param('lease') else None
+ )
+ ) if self.param('lease') is not None else None,
+ cpu=otypes.Cpu(
+ topology=otypes.CpuTopology(
+ cores=self.param('cpu_cores'),
+ sockets=self.param('cpu_sockets'),
+ threads=self.param('cpu_threads'),
+ ) if any((
+ self.param('cpu_cores'),
+ self.param('cpu_sockets'),
+ self.param('cpu_threads')
+ )) else None,
+ cpu_tune=otypes.CpuTune(
+ vcpu_pins=[
+ otypes.VcpuPin(vcpu=int(pin['vcpu']), cpu_set=str(pin['cpu'])) for pin in self.param('cpu_pinning')
+ ],
+ ) if self.param('cpu_pinning') else None,
+ mode=otypes.CpuMode(self.param('cpu_mode')) if self.param('cpu_mode') else None,
+ ) if any((
+ self.param('cpu_cores'),
+ self.param('cpu_sockets'),
+ self.param('cpu_threads'),
+ self.param('cpu_mode'),
+ self.param('cpu_pinning')
+ )) else None,
+ cpu_shares=self.param('cpu_shares'),
+ virtio_scsi=otypes.VirtioScsi(
+ enabled=self.param('virtio_scsi_enabled'),
+ ) if self.param('virtio_scsi_enabled') is not None else None,
+ multi_queues_enabled=self.param('multi_queues_enabled'),
+ virtio_scsi_multi_queues=self.param('virtio_scsi_multi_queues'),
+ os=otypes.OperatingSystem(
+ type=self.param('operating_system'),
+ boot=otypes.Boot(
+ devices=[
+ otypes.BootDevice(dev) for dev in self.param('boot_devices')
+ ],
+ ) if self.param('boot_devices') else None,
+ cmdline=self.param('kernel_params') if self.param('kernel_params_persist') else None,
+ initrd=self.param('initrd_path') if self.param('kernel_params_persist') else None,
+ kernel=self.param('kernel_path') if self.param('kernel_params_persist') else None,
+ ) if (
+ self.param('operating_system') or self.param('boot_devices') or self.param('kernel_params_persist')
+ ) else None,
+ type=otypes.VmType(
+ self.param('type')
+ ) if self.param('type') else None,
+ memory=convert_to_bytes(
+ self.param('memory')
+ ) if self.param('memory') else None,
+ memory_policy=otypes.MemoryPolicy(
+ guaranteed=convert_to_bytes(self.param('memory_guaranteed')),
+ ballooning=self.param('ballooning_enabled'),
+ max=convert_to_bytes(self.param('memory_max')),
+ ) if any((
+ self.param('memory_guaranteed'),
+ self.param('ballooning_enabled') is not None,
+ self.param('memory_max')
+ )) else None,
+ instance_type=otypes.InstanceType(
+ id=get_id_by_name(
+ self._connection.system_service().instance_types_service(),
+ self.param('instance_type'),
+ ),
+ ) if self.param('instance_type') else None,
+ custom_compatibility_version=otypes.Version(
+ major=self._get_major(self.param('custom_compatibility_version')),
+ minor=self._get_minor(self.param('custom_compatibility_version')),
+ ) if self.param('custom_compatibility_version') is not None else None,
+ description=self.param('description'),
+ comment=self.param('comment'),
+ time_zone=otypes.TimeZone(
+ name=self.param('timezone'),
+ ) if self.param('timezone') else None,
+ serial_number=otypes.SerialNumber(
+ policy=otypes.SerialNumberPolicy(self.param('serial_policy')),
+ value=self.param('serial_policy_value'),
+ ) if (
+ self.param('serial_policy') is not None or
+ self.param('serial_policy_value') is not None
+ ) else None,
+ placement_policy=placement_policy,
+ soundcard_enabled=self.param('soundcard_enabled'),
+ display=otypes.Display(
+ smartcard_enabled=self.param('smartcard_enabled'),
+ disconnect_action=display.get('disconnect_action'),
+ keyboard_layout=display.get('keyboard_layout'),
+ monitors=display.get('monitors'),
+ copy_paste_enabled=display.get('copy_paste_enabled'),
+ file_transfer_enabled=display.get('file_transfer_enabled'),
+ ) if (
+ self.param('smartcard_enabled') is not None or
+ display.get('copy_paste_enabled') is not None or
+ display.get('file_transfer_enabled') is not None or
+ display.get('disconnect_action') is not None or
+ display.get('keyboard_layout') is not None or
+ display.get('monitors') is not None
+ ) else None,
+ io=otypes.Io(
+ threads=self.param('io_threads'),
+ ) if self.param('io_threads') is not None else None,
+ numa_tune_mode=otypes.NumaTuneMode(
+ self.param('numa_tune_mode')
+ ) if self.param('numa_tune_mode') else None,
+ rng_device=otypes.RngDevice(
+ source=otypes.RngSource(self.param('rng_device')),
+ ) if self.param('rng_device') else None,
+ custom_properties=[
+ otypes.CustomProperty(
+ name=cp.get('name'),
+ regexp=cp.get('regexp'),
+ value=str(cp.get('value')),
+ ) for cp in self.param('custom_properties') if cp
+ ] if self.param('custom_properties') is not None else None,
+ initialization=self.get_initialization() if self.param('cloud_init_persist') else None,
+ snapshots=[otypes.Snapshot(id=snapshot.id)] if snapshot is not None else None,
+ )
+
+ def _get_export_domain_service(self):
+ provider_name = self._module.params['export_domain']
+ export_sds_service = self._connection.system_service().storage_domains_service()
+ export_sd_id = get_id_by_name(export_sds_service, provider_name)
+ return export_sds_service.service(export_sd_id)
+
+ def post_export_action(self, entity):
+ self._service = self._get_export_domain_service().vms_service()
+
+ def update_check(self, entity):
+ res = self._update_check(entity)
+ if entity.next_run_configuration_exists:
+ res = res and self._update_check(self._service.service(entity.id).get(next_run=True))
+
+ return res
+
+ def _update_check(self, entity):
+ def check_cpu_pinning():
+ if self.param('cpu_pinning'):
+ current = []
+ if entity.cpu.cpu_tune:
+ current = [(str(pin.cpu_set), int(pin.vcpu)) for pin in entity.cpu.cpu_tune.vcpu_pins]
+ passed = [(str(pin['cpu']), int(pin['vcpu'])) for pin in self.param('cpu_pinning')]
+ return sorted(current) == sorted(passed)
+ return True
+
+ def check_custom_properties():
+ if self.param('custom_properties'):
+ current = []
+ if entity.custom_properties:
+ current = [(cp.name, cp.regexp, str(cp.value)) for cp in entity.custom_properties]
+ passed = [(cp.get('name'), cp.get('regexp'), str(cp.get('value'))) for cp in self.param('custom_properties') if cp]
+ return sorted(current) == sorted(passed)
+ return True
+
+ def check_placement_policy():
+ if self.param('placement_policy'):
+ hosts = sorted(
+ map(lambda host: self._connection.follow_link(host).name,
+ entity.placement_policy.hosts if entity.placement_policy.hosts else [])
+ )
+ if self.param('placement_policy_hosts'):
+ return (
+ equal(self.param('placement_policy'), str(entity.placement_policy.affinity) if entity.placement_policy else None) and
+ equal(sorted(self.param('placement_policy_hosts')), hosts)
+ )
+ return (
+ equal(self.param('placement_policy'), str(entity.placement_policy.affinity) if entity.placement_policy else None) and
+ equal([self.param('host')], hosts)
+ )
+ return True
+
+ def check_host():
+ if self.param('host') is not None:
+ return self.param('host') in [self._connection.follow_link(host).name for host in getattr(entity.placement_policy, 'hosts', None) or []]
+ return True
+
+ def check_custom_compatibility_version():
+ if self.param('custom_compatibility_version') is not None:
+ return (self._get_minor(self.param('custom_compatibility_version')) == self._get_minor(entity.custom_compatibility_version) and
+ self._get_major(self.param('custom_compatibility_version')) == self._get_major(entity.custom_compatibility_version))
+ return True
+
+ cpu_mode = getattr(entity.cpu, 'mode')
+ vm_display = entity.display
+ provided_vm_display = self.param('graphical_console') or dict()
+ return (
+ check_cpu_pinning() and
+ check_custom_properties() and
+ check_host() and
+ check_placement_policy() and
+ check_custom_compatibility_version() and
+ not self.param('cloud_init_persist') and
+ not self.param('kernel_params_persist') and
+ equal(self.param('cluster'), get_link_name(self._connection, entity.cluster)) and equal(convert_to_bytes(self.param('memory')), entity.memory) and
+ equal(convert_to_bytes(self.param('memory_guaranteed')), getattr(entity.memory_policy, 'guaranteed', None)) and
+ equal(convert_to_bytes(self.param('memory_max')), getattr(entity.memory_policy, 'max', None)) and
+ equal(self.param('cpu_cores'), getattr(getattr(entity.cpu, 'topology', None), 'cores', None)) and
+ equal(self.param('cpu_sockets'), getattr(getattr(entity.cpu, 'topology', None), 'sockets', None)) and
+ equal(self.param('cpu_threads'), getattr(getattr(entity.cpu, 'topology', None), 'threads', None)) and
+ equal(self.param('cpu_mode'), str(cpu_mode) if cpu_mode else None) and
+ equal(self.param('type'), str(entity.type)) and
+ equal(self.param('name'), str(entity.name)) and
+ equal(self.param('operating_system'), str(getattr(entity.os, 'type', None))) and
+ equal(self.param('boot_menu'), getattr(getattr(entity.bios, 'boot_menu', None), 'enabled', None)) and
+ equal(self.param('bios_type'), getattr(getattr(entity.bios, 'type', None), 'value', None)) and
+ equal(self.param('soundcard_enabled'), entity.soundcard_enabled) and
+ equal(self.param('smartcard_enabled'), getattr(vm_display, 'smartcard_enabled', False)) and
+ equal(self.param('io_threads'), getattr(entity.io, 'threads', None)) and
+ equal(self.param('ballooning_enabled'), getattr(entity.memory_policy, 'ballooning', None)) and
+ equal(self.param('serial_console'), getattr(entity.console, 'enabled', None)) and
+ equal(self.param('usb_support'), getattr(entity.usb, 'enabled', None)) and
+ equal(self.param('sso'), True if getattr(entity.sso, 'methods', False) else False) and
+ equal(self.param('quota_id'), getattr(entity.quota, 'id', None)) and
+ equal(self.param('high_availability'), getattr(entity.high_availability, 'enabled', None)) and
+ equal(self.param('high_availability_priority'), getattr(entity.high_availability, 'priority', None)) and
+ equal(self.param('lease'), get_link_name(self._connection, getattr(entity.lease, 'storage_domain', None))) and
+ equal(self.param('stateless'), entity.stateless) and
+ equal(self.param('cpu_shares'), entity.cpu_shares) and
+ equal(self.param('delete_protected'), entity.delete_protected) and
+ equal(self.param('custom_emulated_machine'), entity.custom_emulated_machine) and
+ equal(self.param('use_latest_template_version'), entity.use_latest_template_version) and
+ equal(self.param('boot_devices'), [str(dev) for dev in getattr(getattr(entity.os, 'boot', None), 'devices', [])]) and
+ equal(self.param('instance_type'), get_link_name(self._connection, entity.instance_type), ignore_case=True) and
+ equal(self.param('description'), entity.description) and
+ equal(self.param('comment'), entity.comment) and
+ equal(self.param('timezone'), getattr(entity.time_zone, 'name', None)) and
+ equal(self.param('serial_policy'), str(getattr(entity.serial_number, 'policy', None))) and
+ equal(self.param('serial_policy_value'), getattr(entity.serial_number, 'value', None)) and
+ equal(self.param('numa_tune_mode'), str(entity.numa_tune_mode)) and
+ equal(self.param('virtio_scsi_enabled'), getattr(entity.virtio_scsi, 'enabled', None)) and
+ equal(self.param('multi_queues_enabled'), entity.multi_queues_enabled) and
+ equal(self.param('virtio_scsi_multi_queues'), entity.virtio_scsi_multi_queues) and
+ equal(self.param('rng_device'), str(entity.rng_device.source) if entity.rng_device else None) and
+ equal(provided_vm_display.get('monitors'), getattr(vm_display, 'monitors', None)) and
+ equal(provided_vm_display.get('copy_paste_enabled'), getattr(vm_display, 'copy_paste_enabled', None)) and
+ equal(provided_vm_display.get('file_transfer_enabled'), getattr(vm_display, 'file_transfer_enabled', None)) and
+ equal(provided_vm_display.get('keyboard_layout'), getattr(vm_display, 'keyboard_layout', None)) and
+ equal(provided_vm_display.get('disconnect_action'), getattr(vm_display, 'disconnect_action', None), ignore_case=True)
+ )
+
+ def pre_create(self, entity):
+ # Mark if entity exists before touching it:
+ if entity is None:
+ self._is_new = True
+
+ def post_update(self, entity):
+ self.post_present(entity.id)
+
+ def post_present(self, entity_id):
+ # After creation of the VM, attach disks and NICs:
+ entity = self._service.service(entity_id).get()
+ self.__attach_disks(entity)
+ self.__attach_nics(entity)
+ self._attach_cd(entity)
+ self.changed = self.__attach_numa_nodes(entity)
+ self.changed = self.__attach_watchdog(entity)
+ self.changed = self.__attach_graphical_console(entity)
+ self.changed = self.__attach_host_devices(entity)
+ self._wait_after_lease()
+
+ def pre_remove(self, entity):
+ # Forcibly stop the VM, if it's not in DOWN state:
+ if entity.status != otypes.VmStatus.DOWN:
+ if not self._module.check_mode:
+ self.changed = self.action(
+ action='stop',
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )['changed']
+
+ def _wait_after_lease(self):
+ if self.param('lease') and self.param('wait_after_lease') != 0:
+ time.sleep(self.param('wait_after_lease'))
+
+ def __suspend_shutdown_common(self, vm_service):
+ if vm_service.get().status in [
+ otypes.VmStatus.MIGRATING,
+ otypes.VmStatus.POWERING_UP,
+ otypes.VmStatus.REBOOT_IN_PROGRESS,
+ otypes.VmStatus.WAIT_FOR_LAUNCH,
+ otypes.VmStatus.UP,
+ otypes.VmStatus.RESTORING_STATE,
+ ]:
+ self._wait_for_UP(vm_service)
+
+ def _pre_shutdown_action(self, entity):
+ vm_service = self._service.vm_service(entity.id)
+ self.__suspend_shutdown_common(vm_service)
+ if entity.status in [otypes.VmStatus.SUSPENDED, otypes.VmStatus.PAUSED]:
+ vm_service.start()
+ self._wait_for_UP(vm_service)
+ return vm_service.get()
+
+ def _pre_suspend_action(self, entity):
+ vm_service = self._service.vm_service(entity.id)
+ self.__suspend_shutdown_common(vm_service)
+ if entity.status in [otypes.VmStatus.PAUSED, otypes.VmStatus.DOWN]:
+ vm_service.start()
+ self._wait_for_UP(vm_service)
+ return vm_service.get()
+
+ def _post_start_action(self, entity):
+ vm_service = self._service.service(entity.id)
+ self._wait_for_UP(vm_service)
+ self._attach_cd(vm_service.get())
+
+ def __get_cds_from_sds(self, sds):
+ for sd in sds:
+ if sd.type == otypes.StorageDomainType.ISO:
+ disks = sd.files
+ elif sd.type == otypes.StorageDomainType.DATA:
+ disks = sd.disks
+ else:
+ continue
+ disks = list(filter(lambda x: (x.name == self.param('cd_iso') or x.id == self.param('cd_iso')) and
+ (sd.type == otypes.StorageDomainType.ISO or x.content_type == otypes.DiskContentType.ISO),
+ self._connection.follow_link(disks)))
+ if disks:
+ return disks
+
+ def __get_cd_id(self):
+ sds_service = self._connection.system_service().storage_domains_service()
+ sds = sds_service.list(search='name="{0}"'.format(self.param('storage_domain') if self.param('storage_domain') else "*"))
+ disks = self.__get_cds_from_sds(sds)
+ if not disks:
+ raise ValueError('Was not able to find disk with name or id "{0}".'.format(self.param('cd_iso')))
+ if len(disks) > 1:
+ raise ValueError('Found mutiple disks with same name "{0}" please use \
+ disk ID in "cd_iso" to specify which disk should be used.'.format(self.param('cd_iso')))
+ return disks[0].id
+
+ def _attach_cd(self, entity):
+ cd_iso_id = self.param('cd_iso')
+ if cd_iso_id is not None:
+ if cd_iso_id:
+ cd_iso_id = self.__get_cd_id()
+ vm_service = self._service.service(entity.id)
+ current = vm_service.get().status == otypes.VmStatus.UP and self.param('state') == 'running'
+ cdroms_service = vm_service.cdroms_service()
+ cdrom_device = cdroms_service.list()[0]
+ cdrom_service = cdroms_service.cdrom_service(cdrom_device.id)
+ cdrom = cdrom_service.get(current=current)
+ if getattr(cdrom.file, 'id', '') != cd_iso_id:
+ if not self._module.check_mode:
+ cdrom_service.update(
+ cdrom=otypes.Cdrom(
+ file=otypes.File(id=cd_iso_id)
+ ),
+ current=current,
+ )
+ self.changed = True
+
+ return entity
+
+ def _migrate_vm(self, entity):
+ vm_host = self.param('host')
+ vm_service = self._service.vm_service(entity.id)
+ # In case VM is preparing to be UP, wait to be up, to migrate it:
+ if entity.status == otypes.VmStatus.UP:
+ if vm_host is not None:
+ hosts_service = self._connection.system_service().hosts_service()
+ clusters_service = self._connection.system_service().clusters_service()
+ current_vm_host = hosts_service.host_service(entity.host.id).get().name
+ if vm_host != current_vm_host:
+ if not self._module.check_mode:
+ vm_service.migrate(
+ cluster=search_by_name(clusters_service, self.param('cluster')),
+ host=otypes.Host(name=vm_host),
+ force=self.param('force_migrate')
+ )
+ self._wait_for_UP(vm_service)
+ self.changed = True
+ elif self.param('migrate'):
+ if not self._module.check_mode:
+ vm_service.migrate(force=self.param('force_migrate'))
+ self._wait_for_UP(vm_service)
+ self.changed = True
+ return entity
+
+ def _wait_for_UP(self, vm_service):
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def _wait_for_vm_disks(self, vm_service):
+ disks_service = self._connection.system_service().disks_service()
+ for da in vm_service.disk_attachments_service().list():
+ disk_service = disks_service.disk_service(da.disk.id)
+ wait(
+ service=disk_service,
+ condition=lambda disk: disk.status == otypes.DiskStatus.OK if disk.storage_type == otypes.DiskStorageType.IMAGE else True,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def wait_for_down(self, vm):
+ """
+ This function will first wait for the status DOWN of the VM.
+ Then it will find the active snapshot and wait until it's state is OK for
+ stateless VMs and stateless snapshot is removed.
+ """
+ vm_service = self._service.vm_service(vm.id)
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+ if vm.stateless:
+ snapshots_service = vm_service.snapshots_service()
+ snapshots = snapshots_service.list()
+ snap_active = [
+ snap for snap in snapshots
+ if snap.snapshot_type == otypes.SnapshotType.ACTIVE
+ ][0]
+ snap_stateless = [
+ snap for snap in snapshots
+ if snap.snapshot_type == otypes.SnapshotType.STATELESS
+ ]
+ # Stateless snapshot may be already removed:
+ if snap_stateless:
+ """
+ We need to wait for Active snapshot ID, to be removed as it's current
+ stateless snapshot. Then we need to wait for staless snapshot ID to
+ be read, for use, because it will become active snapshot.
+ """
+ wait(
+ service=snapshots_service.snapshot_service(snap_active.id),
+ condition=lambda snap: snap is None,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+ wait(
+ service=snapshots_service.snapshot_service(snap_stateless[0].id),
+ condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+ return True
+
+ def __attach_graphical_console(self, entity):
+ graphical_console = self.param('graphical_console')
+ if not graphical_console:
+ return False
+
+ vm_service = self._service.service(entity.id)
+ gcs_service = vm_service.graphics_consoles_service()
+ graphical_consoles = gcs_service.list()
+
+ # Remove all graphical consoles if there are any:
+ if bool(graphical_console.get('headless_mode')):
+ if not self._module.check_mode:
+ for gc in graphical_consoles:
+ gcs_service.console_service(gc.id).remove()
+ return len(graphical_consoles) > 0
+
+ # If there are not gc add any gc to be added:
+ protocol = graphical_console.get('protocol')
+ current_protocols = [str(gc.protocol) for gc in graphical_consoles]
+ if not current_protocols:
+ if not self._module.check_mode:
+ for p in protocol:
+ gcs_service.add(
+ otypes.GraphicsConsole(
+ protocol=otypes.GraphicsType(p),
+ )
+ )
+ return True
+
+ # Update consoles:
+ if protocol is not None and sorted(protocol) != sorted(current_protocols):
+ if not self._module.check_mode:
+ for gc in graphical_consoles:
+ gcs_service.console_service(gc.id).remove()
+ for p in protocol:
+ gcs_service.add(
+ otypes.GraphicsConsole(
+ protocol=otypes.GraphicsType(p),
+ )
+ )
+ return True
+
+ def __attach_disks(self, entity):
+ if not self.param('disks'):
+ return
+
+ vm_service = self._service.service(entity.id)
+ disks_service = self._connection.system_service().disks_service()
+ disk_attachments_service = vm_service.disk_attachments_service()
+
+ self._wait_for_vm_disks(vm_service)
+ for disk in self.param('disks'):
+ # If disk ID is not specified, find disk by name:
+ disk_id = disk.get('id')
+ if disk_id is None:
+ disk_id = getattr(
+ search_by_name(
+ service=disks_service,
+ name=disk.get('name')
+ ),
+ 'id',
+ None
+ )
+
+ # Attach disk to VM:
+ disk_attachment = disk_attachments_service.attachment_service(disk_id)
+ if get_entity(disk_attachment) is None:
+ if not self._module.check_mode:
+ disk_attachments_service.add(
+ otypes.DiskAttachment(
+ disk=otypes.Disk(
+ id=disk_id,
+ ),
+ active=disk.get('activate', True),
+ interface=otypes.DiskInterface(
+ disk.get('interface', 'virtio')
+ ),
+ bootable=disk.get('bootable', False),
+ )
+ )
+ self.changed = True
+
+ def __get_vnic_profile_id(self, nic):
+ """
+ Return VNIC profile ID looked up by it's name, because there can be
+ more VNIC profiles with same name, other criteria of filter is cluster.
+ """
+ vnics_service = self._connection.system_service().vnic_profiles_service()
+ clusters_service = self._connection.system_service().clusters_service()
+ cluster = search_by_name(clusters_service, self.param('cluster'))
+ profiles = [
+ profile for profile in vnics_service.list()
+ if profile.name == nic.get('profile_name')
+ ]
+ cluster_networks = [
+ net.id for net in self._connection.follow_link(cluster.networks)
+ ]
+ try:
+ return next(
+ profile.id for profile in profiles
+ if profile.network.id in cluster_networks
+ )
+ except StopIteration:
+ raise Exception(
+ "Profile '%s' was not found in cluster '%s'" % (
+ nic.get('profile_name'),
+ self.param('cluster')
+ )
+ )
+
+ def __get_numa_serialized(self, numa):
+ return sorted([(x.index,
+ [y.index for y in x.cpu.cores] if x.cpu else [],
+ x.memory,
+ [y.index for y in x.numa_node_pins] if x.numa_node_pins else []
+ ) for x in numa], key=lambda x: x[0])
+
+ def __attach_numa_nodes(self, entity):
+ numa_nodes_service = self._service.service(entity.id).numa_nodes_service()
+ existed_numa_nodes = numa_nodes_service.list()
+ if len(self.param('numa_nodes')) > 0:
+ # Remove all existing virtual numa nodes before adding new ones
+ for current_numa_node in sorted(existed_numa_nodes, reverse=True, key=lambda x: x.index):
+ numa_nodes_service.node_service(current_numa_node.id).remove()
+
+ for numa_node in self.param('numa_nodes'):
+ if numa_node is None or numa_node.get('index') is None or numa_node.get('cores') is None or numa_node.get('memory') is None:
+ continue
+
+ numa_nodes_service.add(
+ otypes.VirtualNumaNode(
+ index=numa_node.get('index'),
+ memory=numa_node.get('memory'),
+ cpu=otypes.Cpu(
+ cores=[
+ otypes.Core(
+ index=core
+ ) for core in numa_node.get('cores')
+ ],
+ ),
+ numa_node_pins=[
+ otypes.NumaNodePin(
+ index=pin
+ ) for pin in numa_node.get('numa_node_pins')
+ ] if numa_node.get('numa_node_pins') is not None else None,
+ )
+ )
+ return self.__get_numa_serialized(numa_nodes_service.list()) != self.__get_numa_serialized(existed_numa_nodes)
+
+ def __attach_watchdog(self, entity):
+ watchdogs_service = self._service.service(entity.id).watchdogs_service()
+ watchdog = self.param('watchdog')
+ if watchdog is not None:
+ current_watchdog = next(iter(watchdogs_service.list()), None)
+ if watchdog.get('model') is None and current_watchdog:
+ watchdogs_service.watchdog_service(current_watchdog.id).remove()
+ return True
+ elif watchdog.get('model') is not None and current_watchdog is None:
+ watchdogs_service.add(
+ otypes.Watchdog(
+ model=otypes.WatchdogModel(watchdog.get('model').lower()),
+ action=otypes.WatchdogAction(watchdog.get('action')),
+ )
+ )
+ return True
+ elif current_watchdog is not None:
+ if (
+ str(current_watchdog.model).lower() != watchdog.get('model').lower() or
+ str(current_watchdog.action).lower() != watchdog.get('action').lower()
+ ):
+ watchdogs_service.watchdog_service(current_watchdog.id).update(
+ otypes.Watchdog(
+ model=otypes.WatchdogModel(watchdog.get('model')),
+ action=otypes.WatchdogAction(watchdog.get('action')),
+ )
+ )
+ return True
+ return False
+
+ def __attach_nics(self, entity):
+ # Attach NICs to VM, if specified:
+ nics_service = self._service.service(entity.id).nics_service()
+ for nic in self.param('nics'):
+ if search_by_name(nics_service, nic.get('name')) is None:
+ if not self._module.check_mode:
+ nics_service.add(
+ otypes.Nic(
+ name=nic.get('name'),
+ interface=otypes.NicInterface(
+ nic.get('interface', 'virtio')
+ ),
+ vnic_profile=otypes.VnicProfile(
+ id=self.__get_vnic_profile_id(nic),
+ ) if nic.get('profile_name') else None,
+ mac=otypes.Mac(
+ address=nic.get('mac_address')
+ ) if nic.get('mac_address') else None,
+ )
+ )
+ self.changed = True
+
+ def get_initialization(self):
+ if self._initialization is not None:
+ return self._initialization
+
+ sysprep = self.param('sysprep')
+ cloud_init = self.param('cloud_init')
+ cloud_init_nics = self.param('cloud_init_nics') or []
+ if cloud_init is not None:
+ cloud_init_nics.append(cloud_init)
+
+ if cloud_init or cloud_init_nics:
+ self._initialization = otypes.Initialization(
+ nic_configurations=[
+ otypes.NicConfiguration(
+ boot_protocol=otypes.BootProtocol(
+ nic.pop('nic_boot_protocol').lower()
+ ) if nic.get('nic_boot_protocol') else None,
+ ipv6_boot_protocol=otypes.BootProtocol(
+ nic.pop('nic_boot_protocol_v6').lower()
+ ) if nic.get('nic_boot_protocol_v6') else None,
+ name=nic.pop('nic_name', None),
+ on_boot=True,
+ ip=otypes.Ip(
+ address=nic.pop('nic_ip_address', None),
+ netmask=nic.pop('nic_netmask', None),
+ gateway=nic.pop('nic_gateway', None),
+ version=otypes.IpVersion('v4')
+ ) if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None
+ ) else None,
+ ipv6=otypes.Ip(
+ address=nic.pop('nic_ip_address_v6', None),
+ netmask=nic.pop('nic_netmask_v6', None),
+ gateway=nic.pop('nic_gateway_v6', None),
+ version=otypes.IpVersion('v6')
+ ) if (
+ nic.get('nic_gateway_v6') is not None or
+ nic.get('nic_netmask_v6') is not None or
+ nic.get('nic_ip_address_v6') is not None
+ ) else None,
+ )
+ for nic in cloud_init_nics
+ if (
+ nic.get('nic_boot_protocol_v6') is not None or
+ nic.get('nic_ip_address_v6') is not None or
+ nic.get('nic_gateway_v6') is not None or
+ nic.get('nic_netmask_v6') is not None or
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None or
+ nic.get('nic_boot_protocol') is not None
+ )
+ ] if cloud_init_nics else None,
+ **cloud_init
+ )
+ elif sysprep:
+ self._initialization = otypes.Initialization(
+ **sysprep
+ )
+ return self._initialization
+
+ def __attach_host_devices(self, entity):
+ vm_service = self._service.service(entity.id)
+ host_devices_service = vm_service.host_devices_service()
+ host_devices = self.param('host_devices')
+ updated = False
+ if host_devices:
+ device_names = [dev.name for dev in host_devices_service.list()]
+ for device in host_devices:
+ device_name = device.get('name')
+ state = device.get('state', 'present')
+ if state == 'absent' and device_name in device_names:
+ updated = True
+ if not self._module.check_mode:
+ device_id = get_id_by_name(host_devices_service, device.get('name'))
+ host_devices_service.device_service(device_id).remove()
+
+ elif state == 'present' and device_name not in device_names:
+ updated = True
+ if not self._module.check_mode:
+ host_devices_service.add(
+ otypes.HostDevice(
+ name=device.get('name'),
+ )
+ )
+
+ return updated
+
+
+def _get_role_mappings(module):
+ roleMappings = list()
+ for roleMapping in module.params['role_mappings']:
+ roleMappings.append(
+ otypes.RegistrationRoleMapping(
+ from_=otypes.Role(
+ name=roleMapping['source_name'],
+ ) if roleMapping['source_name'] else None,
+ to=otypes.Role(
+ name=roleMapping['dest_name'],
+ ) if roleMapping['dest_name'] else None,
+ )
+ )
+ return roleMappings
+
+
+def _get_affinity_group_mappings(module):
+ affinityGroupMappings = list()
+
+ for affinityGroupMapping in module.params['affinity_group_mappings']:
+ affinityGroupMappings.append(
+ otypes.RegistrationAffinityGroupMapping(
+ from_=otypes.AffinityGroup(
+ name=affinityGroupMapping['source_name'],
+ ) if affinityGroupMapping['source_name'] else None,
+ to=otypes.AffinityGroup(
+ name=affinityGroupMapping['dest_name'],
+ ) if affinityGroupMapping['dest_name'] else None,
+ )
+ )
+ return affinityGroupMappings
+
+
+def _get_affinity_label_mappings(module):
+ affinityLabelMappings = list()
+
+ for affinityLabelMapping in module.params['affinity_label_mappings']:
+ affinityLabelMappings.append(
+ otypes.RegistrationAffinityLabelMapping(
+ from_=otypes.AffinityLabel(
+ name=affinityLabelMapping['source_name'],
+ ) if affinityLabelMapping['source_name'] else None,
+ to=otypes.AffinityLabel(
+ name=affinityLabelMapping['dest_name'],
+ ) if affinityLabelMapping['dest_name'] else None,
+ )
+ )
+ return affinityLabelMappings
+
+
+def _get_domain_mappings(module):
+ domainMappings = list()
+
+ for domainMapping in module.params['domain_mappings']:
+ domainMappings.append(
+ otypes.RegistrationDomainMapping(
+ from_=otypes.Domain(
+ name=domainMapping['source_name'],
+ ) if domainMapping['source_name'] else None,
+ to=otypes.Domain(
+ name=domainMapping['dest_name'],
+ ) if domainMapping['dest_name'] else None,
+ )
+ )
+ return domainMappings
+
+
+def _get_lun_mappings(module):
+ lunMappings = list()
+ for lunMapping in module.params['lun_mappings']:
+ lunMappings.append(
+ otypes.RegistrationLunMapping(
+ from_=otypes.Disk(
+ lun_storage=otypes.HostStorage(
+ type=otypes.StorageType(lunMapping['source_storage_type'])
+ if (lunMapping['source_storage_type'] in
+ ['iscsi', 'fcp']) else None,
+ logical_units=[
+ otypes.LogicalUnit(
+ id=lunMapping['source_logical_unit_id'],
+ )
+ ],
+ ),
+ ) if lunMapping['source_logical_unit_id'] else None,
+ to=otypes.Disk(
+ lun_storage=otypes.HostStorage(
+ type=otypes.StorageType(lunMapping['dest_storage_type'])
+ if (lunMapping['dest_storage_type'] in
+ ['iscsi', 'fcp']) else None,
+ logical_units=[
+ otypes.LogicalUnit(
+ id=lunMapping.get('dest_logical_unit_id'),
+ port=lunMapping.get('dest_logical_unit_port'),
+ portal=lunMapping.get('dest_logical_unit_portal'),
+ address=lunMapping.get('dest_logical_unit_address'),
+ target=lunMapping.get('dest_logical_unit_target'),
+ password=lunMapping.get('dest_logical_unit_password'),
+ username=lunMapping.get('dest_logical_unit_username'),
+ )
+ ],
+ ),
+ ) if lunMapping['dest_logical_unit_id'] else None,
+ ),
+ ),
+ return lunMappings
+
+
+def _get_cluster_mappings(module):
+ clusterMappings = list()
+
+ for clusterMapping in module.params['cluster_mappings']:
+ clusterMappings.append(
+ otypes.RegistrationClusterMapping(
+ from_=otypes.Cluster(
+ name=clusterMapping['source_name'],
+ ),
+ to=otypes.Cluster(
+ name=clusterMapping['dest_name'],
+ ) if clusterMapping['dest_name'] else None,
+ )
+ )
+ return clusterMappings
+
+
+def _get_vnic_profile_mappings(module):
+ vnicProfileMappings = list()
+
+ for vnicProfileMapping in module.params['vnic_profile_mappings']:
+ vnicProfileMappings.append(
+ otypes.VnicProfileMapping(
+ source_network_name=vnicProfileMapping['source_network_name'],
+ source_network_profile_name=vnicProfileMapping['source_profile_name'],
+ target_vnic_profile=otypes.VnicProfile(
+ id=vnicProfileMapping['target_profile_id'],
+ ) if vnicProfileMapping['target_profile_id'] else None,
+ )
+ )
+
+ return vnicProfileMappings
+
+
+def import_vm(module, connection):
+ vms_service = connection.system_service().vms_service()
+ if search_by_name(vms_service, module.params['name']) is not None:
+ return False
+
+ events_service = connection.system_service().events_service()
+ last_event = events_service.list(max=1)[0]
+
+ external_type = [
+ tmp for tmp in ['kvm', 'xen', 'vmware']
+ if module.params[tmp] is not None
+ ][0]
+
+ external_vm = module.params[external_type]
+ imports_service = connection.system_service().external_vm_imports_service()
+ imported_vm = imports_service.add(
+ otypes.ExternalVmImport(
+ vm=otypes.Vm(
+ name=module.params['name']
+ ),
+ name=external_vm.get('name'),
+ username=external_vm.get('username', 'test'),
+ password=external_vm.get('password', 'test'),
+ provider=otypes.ExternalVmProviderType(external_type),
+ url=external_vm.get('url'),
+ cluster=otypes.Cluster(
+ name=module.params['cluster'],
+ ) if module.params['cluster'] else None,
+ storage_domain=otypes.StorageDomain(
+ name=external_vm.get('storage_domain'),
+ ) if external_vm.get('storage_domain') else None,
+ sparse=external_vm.get('sparse', True),
+ host=otypes.Host(
+ name=module.params['host'],
+ ) if module.params['host'] else None,
+ )
+ )
+
+ # Wait until event with code 1152 for our VM don't appear:
+ vms_service = connection.system_service().vms_service()
+ wait(
+ service=vms_service.vm_service(imported_vm.vm.id),
+ condition=lambda vm: len(events_service.list(
+ from_=int(last_event.id),
+ search='type=1152 and vm.id=%s' % vm.id,
+ )
+ ) > 0 if vm is not None else False,
+ fail_condition=lambda vm: vm is None,
+ timeout=module.params['timeout'],
+ poll_interval=module.params['poll_interval'],
+ )
+ return True
+
+
+def control_state(vm, vms_service, module):
+ if vm is None:
+ return
+
+ force = module.params['force']
+ state = module.params['state']
+
+ vm_service = vms_service.vm_service(vm.id)
+ if vm.status == otypes.VmStatus.IMAGE_LOCKED:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ elif vm.status == otypes.VmStatus.SAVING_STATE:
+ # Result state is SUSPENDED, we should wait to be suspended:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
+ )
+ elif (
+ vm.status == otypes.VmStatus.UNASSIGNED or
+ vm.status == otypes.VmStatus.UNKNOWN
+ ):
+ # Invalid states:
+ module.fail_json(msg="Not possible to control VM, if it's in '{0}' status".format(vm.status))
+ elif vm.status == otypes.VmStatus.POWERING_DOWN:
+ if (force and state == 'stopped') or state == 'absent':
+ vm_service.stop()
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ else:
+ # If VM is powering down, wait to be DOWN or UP.
+ # VM can end in UP state in case there is no GA
+ # or ACPI on the VM or shutdown operation crashed:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present', choices=[
+ 'absent', 'next_run', 'present', 'registered', 'running', 'stopped', 'suspended', 'exported', 'reboot', 'reset'
+ ]),
+ name=dict(type='str'),
+ id=dict(type='str'),
+ cluster=dict(type='str'),
+ allow_partial_import=dict(type='bool'),
+ template=dict(type='str'),
+ template_version=dict(type='int'),
+ use_latest_template_version=dict(type='bool'),
+ storage_domain=dict(type='str'),
+ disk_format=dict(type='str', default='cow', choices=['cow', 'raw']),
+ disks=dict(type='list', default=[], elements='dict'),
+ memory=dict(type='str'),
+ memory_guaranteed=dict(type='str'),
+ memory_max=dict(type='str'),
+ cpu_sockets=dict(type='int'),
+ cpu_cores=dict(type='int'),
+ cpu_shares=dict(type='int'),
+ cpu_threads=dict(type='int'),
+ type=dict(type='str', choices=['server', 'desktop', 'high_performance']),
+ operating_system=dict(type='str'),
+ cd_iso=dict(type='str'),
+ boot_devices=dict(type='list', choices=['cdrom', 'hd', 'network'], elements='str'),
+ vnic_profile_mappings=dict(default=[], type='list', elements='dict'),
+ cluster_mappings=dict(default=[], type='list', elements='dict'),
+ role_mappings=dict(default=[], type='list', elements='dict'),
+ affinity_group_mappings=dict(default=[], type='list', elements='dict'),
+ affinity_label_mappings=dict(default=[], type='list', elements='dict'),
+ lun_mappings=dict(default=[], type='list', elements='dict'),
+ domain_mappings=dict(default=[], type='list', elements='dict'),
+ reassign_bad_macs=dict(default=None, type='bool'),
+ boot_menu=dict(type='bool'),
+ bios_type=dict(type='str', choices=['i440fx_sea_bios', 'q35_ovmf', 'q35_sea_bios', 'q35_secure_boot']),
+ serial_console=dict(type='bool'),
+ usb_support=dict(type='bool'),
+ sso=dict(type='bool'),
+ quota_id=dict(type='str'),
+ high_availability=dict(type='bool'),
+ high_availability_priority=dict(type='int'),
+ lease=dict(type='str'),
+ wait_after_lease=dict(type='int', default=5),
+ stateless=dict(type='bool'),
+ delete_protected=dict(type='bool'),
+ custom_emulated_machine=dict(type='str'),
+ force=dict(type='bool', default=False),
+ nics=dict(type='list', default=[], elements='dict'),
+ cloud_init=dict(type='dict'),
+ cloud_init_nics=dict(type='list', default=[], elements='dict'),
+ cloud_init_persist=dict(type='bool', default=False, aliases=['sysprep_persist']),
+ kernel_params_persist=dict(type='bool', default=False),
+ sysprep=dict(type='dict'),
+ host=dict(type='str'),
+ clone=dict(type='bool', default=False),
+ clone_permissions=dict(type='bool', default=False),
+ kernel_path=dict(type='str'),
+ initrd_path=dict(type='str'),
+ kernel_params=dict(type='str'),
+ instance_type=dict(type='str'),
+ description=dict(type='str'),
+ comment=dict(type='str'),
+ timezone=dict(type='str'),
+ serial_policy=dict(type='str', choices=['vm', 'host', 'custom']),
+ serial_policy_value=dict(type='str'),
+ vmware=dict(type='dict'),
+ xen=dict(type='dict'),
+ kvm=dict(type='dict'),
+ cpu_mode=dict(type='str'),
+ placement_policy=dict(type='str'),
+ placement_policy_hosts=dict(type='list', elements='str'),
+ custom_compatibility_version=dict(type='str'),
+ ticket=dict(type='bool', default=None),
+ cpu_pinning=dict(type='list', elements='dict'),
+ soundcard_enabled=dict(type='bool', default=None),
+ smartcard_enabled=dict(type='bool', default=None),
+ io_threads=dict(type='int', default=None),
+ ballooning_enabled=dict(type='bool', default=None),
+ rng_device=dict(type='str'),
+ numa_tune_mode=dict(type='str', choices=['interleave', 'preferred', 'strict']),
+ numa_nodes=dict(type='list', default=[], elements='dict'),
+ custom_properties=dict(type='list', elements='dict'),
+ watchdog=dict(type='dict'),
+ host_devices=dict(type='list', elements='dict'),
+ graphical_console=dict(
+ type='dict',
+ options=dict(
+ headless_mode=dict(type='bool'),
+ protocol=dict(type='list', elements='str'),
+ disconnect_action=dict(type='str'),
+ keyboard_layout=dict(type='str'),
+ monitors=dict(type='int'),
+ file_transfer_enabled=dict(type='bool'),
+ copy_paste_enabled=dict(type='bool'),
+ )
+ ),
+ exclusive=dict(type='bool'),
+ volatile=dict(type='bool'),
+ export_domain=dict(default=None),
+ export_ova=dict(type='dict'),
+ force_migrate=dict(type='bool'),
+ migrate=dict(type='bool', default=None),
+ next_run=dict(type='bool'),
+ virtio_scsi_enabled=dict(type='bool'),
+ multi_queues_enabled=dict(type='bool'),
+ virtio_scsi_multi_queues=dict(type='int'),
+ snapshot_name=dict(type='str'),
+ snapshot_vm=dict(type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['id', 'name']],
+ required_if=[
+ ('state', 'registered', ['storage_domain']),
+ ],
+ required_together=[['snapshot_name', 'snapshot_vm']]
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ state = module.params['state']
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vms_module = VmsModule(
+ connection=connection,
+ module=module,
+ service=vms_service,
+ )
+ vm = vms_module.search_entity(list_params={'all_content': True})
+
+ control_state(vm, vms_service, module)
+ if state in ('present', 'running', 'next_run'):
+ if module.params['xen'] or module.params['kvm'] or module.params['vmware']:
+ vms_module.changed = import_vm(module, connection)
+
+ # Allow migrate vm when state present.
+ # Migrate before update
+ if vm:
+ vms_module._migrate_vm(vm)
+
+ # In case of wait=false and state=running, waits for VM to be created
+ # In case VM don't exist, wait for VM DOWN state,
+ # otherwise don't wait for any state, just update VM:
+ ret = vms_module.create(
+ entity=vm,
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ update_params={'next_run': module.params['next_run']} if module.params['next_run'] is not None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ _wait=True if not module.params['wait'] and state == 'running' else module.params['wait'],
+ )
+ # If VM is going to be created and check_mode is on, return now:
+ if module.check_mode and ret.get('id') is None:
+ module.exit_json(**ret)
+
+ vms_module.post_present(ret['id'])
+ # Run the VM if it was just created, else don't run it:
+ if state == 'running':
+ def kernel_persist_check():
+ return (module.params.get('kernel_params') or
+ module.params.get('initrd_path') or
+ module.params.get('kernel_path')
+ and not module.params.get('cloud_init_persist'))
+ initialization = vms_module.get_initialization()
+ ret = vms_module.action(
+ action='start',
+ post_action=vms_module._post_start_action,
+ action_condition=lambda vm: (
+ vm.status not in [
+ otypes.VmStatus.MIGRATING,
+ otypes.VmStatus.POWERING_UP,
+ otypes.VmStatus.REBOOT_IN_PROGRESS,
+ otypes.VmStatus.WAIT_FOR_LAUNCH,
+ otypes.VmStatus.UP,
+ otypes.VmStatus.RESTORING_STATE,
+ ]
+ ),
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ # Start action kwargs:
+ use_cloud_init=True if not module.params.get('cloud_init_persist') and module.params.get('cloud_init') else None,
+ use_sysprep=True if not module.params.get('cloud_init_persist') and module.params.get('sysprep') else None,
+ volatile=module.params.get('volatile'),
+ vm=otypes.Vm(
+ placement_policy=otypes.VmPlacementPolicy(
+ hosts=[otypes.Host(name=module.params['host'])]
+ ) if module.params['host'] else None,
+ initialization=initialization,
+ os=otypes.OperatingSystem(
+ cmdline=module.params.get('kernel_params'),
+ initrd=module.params.get('initrd_path'),
+ kernel=module.params.get('kernel_path'),
+ ) if (kernel_persist_check()) else None,
+ ) if (
+ kernel_persist_check() or
+ module.params.get('host') or
+ initialization is not None
+ and not module.params.get('cloud_init_persist')
+ ) else None,
+ )
+
+ if module.params['ticket']:
+ vm_service = vms_service.vm_service(ret['id'])
+ graphics_consoles_service = vm_service.graphics_consoles_service()
+ graphics_console = graphics_consoles_service.list()[0]
+ console_service = graphics_consoles_service.console_service(graphics_console.id)
+ ticket = console_service.remote_viewer_connection_file()
+ if ticket:
+ ret['vm']['remote_vv_file'] = ticket
+
+ if state == 'next_run':
+ # Apply next run configuration, if needed:
+ vm = vms_service.vm_service(ret['id']).get()
+ if vm.next_run_configuration_exists:
+ ret = vms_module.action(
+ action='reboot',
+ entity=vm,
+ action_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ )
+ ret['changed'] = vms_module.changed
+ elif state == 'stopped':
+ if module.params['xen'] or module.params['kvm'] or module.params['vmware']:
+ vms_module.changed = import_vm(module, connection)
+
+ ret = vms_module.create(
+ entity=vm,
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ )
+ if module.params['force']:
+ ret = vms_module.action(
+ action='stop',
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=vms_module.wait_for_down,
+ )
+ else:
+ ret = vms_module.action(
+ action='shutdown',
+ pre_action=vms_module._pre_shutdown_action,
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=vms_module.wait_for_down,
+ )
+ vms_module.post_present(ret['id'])
+ elif state == 'suspended':
+ ret = vms_module.create(
+ entity=vm,
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ )
+ vms_module.post_present(ret['id'])
+ ret = vms_module.action(
+ action='suspend',
+ pre_action=vms_module._pre_suspend_action,
+ action_condition=lambda vm: vm.status != otypes.VmStatus.SUSPENDED,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
+ )
+ elif state == 'absent':
+ ret = vms_module.remove()
+ elif state == 'registered':
+ storage_domains_service = connection.system_service().storage_domains_service()
+
+ # Find the storage domain with unregistered VM:
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ vms_service = storage_domain_service.vms_service()
+
+ # Find the unregistered VM we want to register:
+ vms = vms_service.list(unregistered=True)
+ vm = next(
+ (vm for vm in vms if (vm.id == module.params['id'] or vm.name == module.params['name'])),
+ None
+ )
+ changed = False
+ if vm is None:
+ vm = vms_module.search_entity()
+ if vm is None:
+ raise ValueError(
+ "VM '%s(%s)' wasn't found." % (module.params['name'], module.params['id'])
+ )
+ else:
+ # Register the vm into the system:
+ changed = True
+ vm_service = vms_service.vm_service(vm.id)
+ vm_service.register(
+ allow_partial_import=module.params['allow_partial_import'],
+ cluster=otypes.Cluster(
+ name=module.params['cluster']
+ ) if module.params['cluster'] else None,
+ vnic_profile_mappings=_get_vnic_profile_mappings(module)
+ if module.params['vnic_profile_mappings'] else None,
+ reassign_bad_macs=module.params['reassign_bad_macs']
+ if module.params['reassign_bad_macs'] is not None else None,
+ registration_configuration=otypes.RegistrationConfiguration(
+ cluster_mappings=_get_cluster_mappings(module),
+ role_mappings=_get_role_mappings(module),
+ domain_mappings=_get_domain_mappings(module),
+ lun_mappings=_get_lun_mappings(module),
+ affinity_group_mappings=_get_affinity_group_mappings(module),
+ affinity_label_mappings=_get_affinity_label_mappings(module),
+ ) if (module.params['cluster_mappings']
+ or module.params['role_mappings']
+ or module.params['domain_mappings']
+ or module.params['lun_mappings']
+ or module.params['affinity_group_mappings']
+ or module.params['affinity_label_mappings']) else None
+ )
+
+ if module.params['wait']:
+ vm = vms_module.wait_for_import()
+ else:
+ # Fetch vm to initialize return.
+ vm = vm_service.get()
+ ret = {
+ 'changed': changed,
+ 'id': vm.id,
+ 'vm': get_dict_of_struct(vm)
+ }
+ elif state == 'exported':
+ if module.params['export_domain']:
+ export_service = vms_module._get_export_domain_service()
+ export_vm = search_by_attributes(export_service.vms_service(), id=vm.id)
+
+ ret = vms_module.action(
+ entity=vm,
+ action='export',
+ action_condition=lambda t: export_vm is None or module.params['exclusive'],
+ wait_condition=lambda t: t is not None,
+ post_action=vms_module.post_export_action,
+ storage_domain=otypes.StorageDomain(id=export_service.get().id),
+ exclusive=module.params['exclusive'],
+ )
+ elif module.params['export_ova']:
+ export_vm = module.params['export_ova']
+ ret = vms_module.action(
+ entity=vm,
+ action='export_to_path_on_host',
+ host=otypes.Host(name=export_vm.get('host')),
+ directory=export_vm.get('directory'),
+ filename=export_vm.get('filename'),
+ )
+ elif state == 'reboot':
+ ret = vms_module.action(
+ action='reboot',
+ entity=vm,
+ action_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ )
+
+ elif state == 'reset':
+ ret = vms_module.action(
+ action='reset',
+ entity=vm,
+ action_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ )
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_info.py
new file mode 100644
index 000000000..037634585
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_info.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vm_info
+short_description: Retrieve information about one or more oVirt/RHV virtual machines
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines."
+ - This module was called C(ovirt_vm_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_vm_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search VM X from cluster Y use following pattern:
+ name=X and cluster=Y"
+ type: str
+ all_content:
+ description:
+ - "If I(true) all the attributes of the virtual machines should be
+ included in the response."
+ type: bool
+ default: false
+ case_sensitive:
+ description:
+ - "If I(true) performed search will take case into account."
+ type: bool
+ default: true
+ max:
+ description:
+ - "The maximum number of results to return."
+ type: int
+ next_run:
+ description:
+ - "Indicates if the returned result describes the virtual machine as it is currently running or if describes
+ the virtual machine with the modifications that have already been performed but that will only come into
+ effect when the virtual machine is restarted. By default the value is set by engine."
+ type: bool
+ current_cd:
+ description:
+ - "If I(true) it will get from all virtual machines current attached cd."
+ type: bool
+ default: false
+ version_added: 1.2.0
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/vm/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all VMs which names start with C(centos) and
+# belong to cluster C(west):
+- ovirt.ovirt.ovirt_vm_info:
+ pattern: name=centos* and cluster=west
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms }}"
+
+# Gather info about next run configuration of virtual machine named myvm
+- ovirt.ovirt.ovirt_vm_info:
+ pattern: name=myvm
+ next_run: true
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms[0] }}"
+
+# Gather info about VMs original template with follow parameter
+- ovirt.ovirt.ovirt_vm_info:
+ pattern: name=myvm
+ follow: ['original_template.permissions', 'original_template.nics.vnic_profile']
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms[0] }}"
+'''
+
+RETURN = '''
+ovirt_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ current_cd=dict(default=False, type='bool'),
+ next_run=dict(default=None, type='bool'),
+ case_sensitive=dict(default=True, type='bool'),
+ max=dict(default=None, type='int'),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vms = vms_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content'],
+ case_sensitive=module.params['case_sensitive'],
+ max=module.params['max'],
+ follow=",".join(module.params['follow']),
+ )
+ if module.params['next_run']:
+ vms = [vms_service.vm_service(vm.id).get(next_run=True) for vm in vms]
+
+ result = dict(
+ ovirt_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ follow=module.params.get('follow'),
+ ) for c in vms
+ ],
+ )
+ for i, vm in enumerate(result['ovirt_vms']):
+ if module.params['current_cd']:
+ vm_service = vms_service.vm_service(vm['id'])
+ cdroms_service = vm_service.cdroms_service()
+ cdrom_device = cdroms_service.list()[0]
+ cdrom_service = cdroms_service.cdrom_service(cdrom_device.id)
+ result['ovirt_vms'][i]['current_cd'] = get_dict_of_struct(
+ struct=cdrom_service.get(current=True),
+ connection=connection,
+ )
+ else:
+ result['ovirt_vms'][i]['current_cd'] = {}
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_os_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_os_info.py
new file mode 100644
index 000000000..2ddb9defb
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_os_info.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vm_os_info
+short_description: Retrieve information on all supported oVirt/RHV operating systems
+version_added: "1.1.0"
+author:
+- "Martin Necas (@mnecas)"
+- "Chris Brown (@snecklifter)"
+description:
+ - "Retrieve information on all supported oVirt/RHV operating systems."
+notes:
+ - "This module returns a variable C(ovirt_operating_systems), which
+ contains a list of operating systems. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ filter_keys:
+ description:
+ - "List of attributes which should be in returned."
+ type: list
+ elements: str
+ name:
+ description:
+ - "Name of the operating system which should be returned."
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url:
+ https://ovirt.github.io/ovirt-engine-api-model/master/#types/operating_system_info/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Look at ovirt_auth module to see how to reuse authentication:
+
+- ovirt.ovirt.ovirt_vm_os_info:
+ auth: "{{ ovirt_auth }}"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_operating_systems }}"
+
+- ovirt.ovirt.ovirt_vm_os_info:
+ auth: "{{ ovirt_auth }}"
+ filter_keys: name,architecture
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_operating_systems }}"
+'''
+
+RETURN = '''
+ovirt_operating_systems:
+ description: "List of dictionaries describing the operating systems. Operating system attributes are mapped to dictionary keys,
+ all operating systems attributes can be found at following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/operating_system_info."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ filter_keys=dict(default=None, type='list', elements='str', no_log=True),
+ name=dict(default=None, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ operating_systems_service = connection.system_service().operating_systems_service()
+ operating_systems = operating_systems_service.list(follow=",".join(module.params['follow']))
+ if module.params['name']:
+ operating_systems = filter(lambda x: x.name == module.params['name'], operating_systems)
+ result = dict(
+ ovirt_operating_systems=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ filter_keys=module.params['filter_keys'],
+ ) for c in operating_systems
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool.py
new file mode 100644
index 000000000..083260581
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool.py
@@ -0,0 +1,491 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vmpool
+short_description: Module to manage VM pools in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage VM pools in oVirt/RHV."
+options:
+ id:
+ description:
+ - "ID of the vmpool to manage."
+ type: str
+ name:
+ description:
+ - "Name of the VM pool to manage."
+ type: str
+ required: true
+ comment:
+ description:
+ - Comment of the Virtual Machine pool.
+ type: str
+ state:
+ description:
+ - "Should the VM pool be present/absent."
+ - "Note that when C(state) is I(absent) all VMs in VM pool are stopped and removed."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ template:
+ description:
+ - "Name of the template, which will be used to create VM pool."
+ type: str
+ description:
+ description:
+ - "Description of the VM pool."
+ type: str
+ cluster:
+ description:
+ - "Name of the cluster, where VM pool should be created."
+ type: str
+ type:
+ description:
+ - "Type of the VM pool. Either manual or automatic."
+ - "C(manual) - The administrator is responsible for explicitly returning the virtual machine to the pool.
+ The virtual machine reverts to the original base image after the administrator returns it to the pool."
+ - "C(Automatic) - When the virtual machine is shut down, it automatically reverts to its base image and
+ is returned to the virtual machine pool."
+ - "Default value is set by engine."
+ choices: ['manual', 'automatic']
+ type: str
+ vm_per_user:
+ description:
+ - "Maximum number of VMs a single user can attach to from this pool."
+ - "Default value is set by engine."
+ type: int
+ prestarted:
+ description:
+ - "Number of pre-started VMs defines the number of VMs in run state, that are waiting
+ to be attached to Users."
+ - "Default value is set by engine."
+ type: int
+ vm_count:
+ description:
+ - "Number of VMs in the pool."
+ - "Default value is set by engine."
+ type: int
+ vm:
+ description:
+ - "For creating vm pool without editing template."
+ - "Note: You can use C(vm) only for creating vm pool."
+ type: dict
+ suboptions:
+ comment:
+ description:
+ - Comment of the Virtual Machine.
+ timezone:
+ description:
+ - Sets time zone offset of the guest hardware clock.
+ - For example C(Etc/GMT)
+ memory:
+ description:
+ - Amount of memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ memory_guaranteed:
+ description:
+ - Amount of minimal guaranteed memory of the Virtual Machine.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
+ - Default value is set by engine.
+ memory_max:
+ description:
+ - Upper bound of virtual machine memory up to which memory hot-plug can be performed.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ cloud_init:
+ description:
+ - Dictionary with values for Unix-like Virtual Machine initialization using cloud init.
+ - C(host_name) - Hostname to be set to Virtual Machine when deployed.
+ - C(timezone) - Timezone to be set to Virtual Machine when deployed.
+ - C(user_name) - Username to be used to set password to Virtual Machine when deployed.
+ - C(root_password) - Password to be set for user specified by C(user_name) parameter.
+ - C(authorized_ssh_keys) - Use this SSH keys to login to Virtual Machine.
+ - C(regenerate_ssh_keys) - If I(True) SSH keys will be regenerated on Virtual Machine.
+ - C(custom_script) - Cloud-init script which will be executed on Virtual Machine when deployed. This is appended to the end of the
+ cloud-init script generated by any other options. For further information, refer to cloud-init User-Data documentation.
+ - C(dns_servers) - DNS servers to be configured on Virtual Machine, maximum of two, space-separated.
+ - C(dns_search) - DNS search domains to be configured on Virtual Machine.
+ - C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
+ - C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ - C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ - C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ - C(nic_name) - Set name to network interface of Virtual Machine.
+ sso:
+ description:
+ - "I(True) enable Single Sign On by Guest Agent, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ smartcard_enabled:
+ description:
+ - "If I(true), use smart card authentication."
+ type: bool
+ nics:
+ description:
+ - List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
+ - C(name) - Name of the NIC.
+ - C(profile_name) - Profile name where NIC should be attached.
+ - C(interface) - Type of the network interface. One of following I(virtio), I(e1000), I(rtl8139), default is I(virtio).
+ - C(mac_address) - Custom MAC address of the network interface, by default it's obtained from MAC pool.
+ - NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
+ - To manage NICs of the VM in more depth please use M(ovirt.ovirt.ovirt_nics) module instead.
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Create VM pool from template
+ ovirt.ovirt.ovirt_vmpool:
+ cluster: mycluster
+ name: myvmpool
+ template: rhel7
+ vm_count: 2
+ prestarted: 2
+ vm_per_user: 1
+
+- name: Remove vmpool, note that all VMs in pool will be stopped and removed
+ ovirt.ovirt.ovirt_vmpool:
+ state: absent
+ name: myvmpool
+
+- name: Change Pool Name
+ ovirt.ovirt.ovirt_vmpool:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_pool_name"
+
+- name: Create vm pool and override the pool values
+ ovirt.ovirt.ovirt_vmpool:
+ cluster: mycluster
+ name: vmpool
+ template: blank
+ vm_count: 2
+ prestarted: 1
+ vm_per_user: 1
+ vm:
+ memory: 4GiB
+ memory_guaranteed: 4GiB
+ memory_max: 10GiB
+ comment: vncomment
+ cloud_init:
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+ nic_name: eth1
+ host_name: example.com
+ custom_script: |
+ write_files:
+ - content: |
+ Hello, world!
+ path: /tmp/greeting.txt
+ permissions: '0644'
+ user_name: root
+ root_password: super_password
+ nics:
+ - name: nicname
+ interface: virtio
+ profile_name: network
+
+'''
+
+RETURN = '''
+id:
+ description: ID of the VM pool which is managed
+ returned: On success if VM pool is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+vm_pool:
+ description: "Dictionary of all the VM pool attributes. VM pool attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool."
+ returned: On success if VM pool is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_params,
+ check_sdk,
+ create_connection,
+ equal,
+ get_link_name,
+ ovirt_full_argument_spec,
+ wait,
+ convert_to_bytes,
+ search_by_name,
+)
+
+
+class VmPoolsModule(BaseModule):
+ def __init__(self, *args, **kwargs):
+ super(VmPoolsModule, self).__init__(*args, **kwargs)
+ self._initialization = None
+
+ def build_entity(self):
+ vm = self.param('vm')
+ return otypes.VmPool(
+ id=self._module.params['id'],
+ name=self._module.params['name'],
+ description=self._module.params['description'],
+ comment=self._module.params['comment'],
+ cluster=otypes.Cluster(
+ name=self._module.params['cluster']
+ ) if self._module.params['cluster'] else None,
+ template=otypes.Template(
+ name=self._module.params['template']
+ ) if self._module.params['template'] else None,
+ max_user_vms=self._module.params['vm_per_user'],
+ prestarted_vms=self._module.params['prestarted'],
+ size=self._module.params['vm_count'],
+ type=otypes.VmPoolType(
+ self._module.params['type']
+ ) if self._module.params['type'] else None,
+ vm=self.build_vm(vm) if self._module.params['vm'] else None,
+ )
+
+ def build_vm(self, vm):
+ return otypes.Vm(
+ comment=vm.get('comment'),
+ memory=convert_to_bytes(
+ vm.get('memory')
+ ) if vm.get('memory') else None,
+ memory_policy=otypes.MemoryPolicy(
+ guaranteed=convert_to_bytes(vm.get('memory_guaranteed')),
+ max=convert_to_bytes(vm.get('memory_max')),
+ ) if any((
+ vm.get('memory_guaranteed'),
+ vm.get('memory_max')
+ )) else None,
+ initialization=self.get_initialization(vm),
+ display=otypes.Display(
+ smartcard_enabled=vm.get('smartcard_enabled')
+ ) if vm.get('smartcard_enabled') is not None else None,
+ sso=(
+ otypes.Sso(
+ methods=[otypes.Method(id=otypes.SsoMethod.GUEST_AGENT)] if vm.get('sso') else []
+ )
+ ) if vm.get('sso') is not None else None,
+ time_zone=otypes.TimeZone(
+ name=vm.get('timezone'),
+ ) if vm.get('timezone') else None,
+ )
+
+ def get_initialization(self, vm):
+ if self._initialization is not None:
+ return self._initialization
+
+ sysprep = vm.get('sysprep')
+ cloud_init = vm.get('cloud_init')
+ cloud_init_nics = vm.get('cloud_init_nics') or []
+ if cloud_init is not None:
+ cloud_init_nics.append(cloud_init)
+
+ if cloud_init or cloud_init_nics:
+ self._initialization = otypes.Initialization(
+ nic_configurations=[
+ otypes.NicConfiguration(
+ boot_protocol=otypes.BootProtocol(
+ nic.pop('nic_boot_protocol').lower()
+ ) if nic.get('nic_boot_protocol') else None,
+ name=nic.pop('nic_name', None),
+ on_boot=True,
+ ip=otypes.Ip(
+ address=nic.pop('nic_ip_address', None),
+ netmask=nic.pop('nic_netmask', None),
+ gateway=nic.pop('nic_gateway', None),
+ ) if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None
+ ) else None,
+ )
+ for nic in cloud_init_nics
+ if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None or
+ nic.get('nic_boot_protocol') is not None
+ )
+ ] if cloud_init_nics else None,
+ **cloud_init
+ )
+ elif sysprep:
+ self._initialization = otypes.Initialization(
+ **sysprep
+ )
+ return self._initialization
+
+ def get_vms(self, entity):
+ vms = self._connection.system_service().vms_service().list()
+ resp = []
+ for vm in vms:
+ if vm.vm_pool is not None and vm.vm_pool.id == entity.id:
+ resp.append(vm)
+ return resp
+
+ def post_create(self, entity):
+ vm_param = self.param('vm')
+ if vm_param is not None and vm_param.get('nics') is not None:
+ vms = self.get_vms(entity)
+ for vm in vms:
+ self.__attach_nics(vm, vm_param)
+
+ def __attach_nics(self, entity, vm_param):
+ # Attach NICs to VM, if specified:
+ vms_service = self._connection.system_service().vms_service()
+ nics_service = vms_service.service(entity.id).nics_service()
+ for nic in vm_param.get('nics'):
+ if search_by_name(nics_service, nic.get('name')) is None:
+ if not self._module.check_mode:
+ nics_service.add(
+ otypes.Nic(
+ name=nic.get('name'),
+ interface=otypes.NicInterface(
+ nic.get('interface', 'virtio')
+ ),
+ vnic_profile=otypes.VnicProfile(
+ id=self.__get_vnic_profile_id(nic),
+ ) if nic.get('profile_name') else None,
+ mac=otypes.Mac(
+ address=nic.get('mac_address')
+ ) if nic.get('mac_address') else None,
+ )
+ )
+ self.changed = True
+
+ def __get_vnic_profile_id(self, nic):
+ """
+ Return VNIC profile ID looked up by it's name, because there can be
+ more VNIC profiles with same name, other criteria of filter is cluster.
+ """
+ vnics_service = self._connection.system_service().vnic_profiles_service()
+ clusters_service = self._connection.system_service().clusters_service()
+ cluster = search_by_name(clusters_service, self.param('cluster'))
+ profiles = [
+ profile for profile in vnics_service.list()
+ if profile.name == nic.get('profile_name')
+ ]
+ cluster_networks = [
+ net.id for net in self._connection.follow_link(cluster.networks)
+ ]
+ try:
+ return next(
+ profile.id for profile in profiles
+ if profile.network.id in cluster_networks
+ )
+ except StopIteration:
+ raise Exception(
+ "Profile '%s' was not found in cluster '%s'" % (
+ nic.get('profile_name'),
+ self.param('cluster')
+ )
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('comment'), entity.comment) and
+ equal(self._module.params.get('vm_per_user'), entity.max_user_vms) and
+ equal(self._module.params.get('prestarted'), entity.prestarted_vms) and
+ equal(self._module.params.get('vm_count'), entity.size)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ id=dict(default=None),
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ template=dict(default=None),
+ cluster=dict(default=None),
+ description=dict(default=None),
+ vm=dict(default=None, type='dict'),
+ comment=dict(default=None),
+ vm_per_user=dict(default=None, type='int'),
+ prestarted=dict(default=None, type='int'),
+ vm_count=dict(default=None, type='int'),
+ type=dict(default=None, choices=['automatic', 'manual']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vm_pools_service = connection.system_service().vm_pools_service()
+ vm_pools_module = VmPoolsModule(
+ connection=connection,
+ module=module,
+ service=vm_pools_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = vm_pools_module.create()
+
+ # Wait for all VM pool VMs to be created:
+ if module.params['wait']:
+ vms_service = connection.system_service().vms_service()
+ for vm in vms_service.list(search='pool=%s' % module.params['name']):
+ wait(
+ service=vms_service.service(vm.id),
+ condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
+ timeout=module.params['timeout'],
+ )
+
+ elif state == 'absent':
+ ret = vm_pools_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool_info.py
new file mode 100644
index 000000000..ac0d2f866
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool_info.py
@@ -0,0 +1,132 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vmpool_info
+short_description: Retrieve information about one or more oVirt/RHV vmpools
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV vmpools."
+ - This module was called C(ovirt_vmpool_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_vmpool_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_vmpools), which
+ contains a list of vmpools. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search vmpool X: name=X"
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all vm pools which names start with C(centos):
+- ovirt.ovirt.ovirt_vmpool_info:
+ pattern: name=centos*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_vm_pools }}"
+'''
+
+RETURN = '''
+ovirt_vm_pools:
+ description: "List of dictionaries describing the vmpools. Vm pool attributes are mapped to dictionary keys,
+ all vmpools attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vmpools_service = connection.system_service().vm_pools_service()
+ vmpools = vmpools_service.list(
+ search=module.params['pattern'],
+ follow=",".join(module.params['follow'])
+ )
+ result = dict(
+ ovirt_vm_pools=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vmpools
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile.py
new file mode 100644
index 000000000..b8a080e01
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vnic_profile
+short_description: Module to manage vNIC profile of network in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage vNIC profile of network in oVirt/RHV"
+options:
+ name:
+ description:
+ - "A human-readable name in plain text."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the vNIC be absent/present."
+ choices: ['absent', 'present']
+ default: present
+ type: str
+ description:
+ description:
+ - "A human-readable description in plain text."
+ type: str
+ data_center:
+ description:
+ - "Datacenter name where network reside."
+ type: str
+ required: true
+ network:
+ description:
+ - "Name of network to which is vNIC attached."
+ type: str
+ required: true
+ network_filter:
+ description:
+ - "The network filter enables to filter packets send to/from the VM's nic according to defined rules."
+ type: str
+ custom_properties:
+ description:
+ - "Custom properties applied to the vNIC profile."
+ - "Custom properties is a list of dictionary which can have following values:"
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - "Name of the custom property. For example: I(hugepages), I(vhost), I(sap_agent), etc."
+ regexp:
+ description:
+ - Regular expression to set for custom property.
+ value:
+ description:
+ - Value to set for custom property.
+ qos:
+ description:
+ - "Quality of Service attributes regulate inbound and outbound network traffic of the NIC."
+ type: str
+ port_mirroring:
+ description:
+ - "Enables port mirroring."
+ type: bool
+ pass_through:
+ description:
+ - "Enables passthrough to an SR-IOV-enabled host NIC."
+ - "When enabled C(qos) and C(network_filter) are automatically set to None and C(port_mirroring) to False."
+ - "When enabled and C(migratable) not specified then C(migratable) is enabled."
+ - "Port mirroring, QoS and network filters are not supported on passthrough profiles."
+ choices: ['disabled', 'enabled']
+ type: str
+ migratable:
+ description:
+ - "Marks whether pass_through NIC is migratable or not."
+ type: bool
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+- name: Add vNIC
+ ovirt.ovirt.ovirt_vnic_profile:
+ name: myvnic
+ network: mynetwork
+ state: present
+ data_center: datacenter
+
+- name: Editing vNICs network_filter, custom_properties, qos
+ ovirt.ovirt.ovirt_vnic_profile:
+ name: myvnic
+ network: mynetwork
+ data_center: datacenter
+ qos: myqos
+ custom_properties:
+ - name: SecurityGroups
+ value: 9bd9bde9-39da-44a8-9541-aa39e1a81c9d
+ network_filter: allow-dhcp
+
+- name: Remove vNICs network_filter, custom_properties, qos
+ ovirt.ovirt.ovirt_vnic_profile:
+ name: myvnic
+ network: mynetwork
+ data_center: datacenter
+ qos: ""
+ custom_properties: ""
+ network_filter: ""
+
+- name: Dont use migratable
+ ovirt.ovirt.ovirt_vnic_profile:
+ name: myvnic
+ network: mynetwork
+ data_center: datacenter
+ migratable: False
+ pass_through: enabled
+
+- name: Remove vNIC
+ ovirt.ovirt.ovirt_vnic_profile:
+ name: myvnic
+ network: mynetwork
+ state: absent
+ data_center: datacenter
+'''
+
+RETURN = '''
+id:
+ description: ID of the vNIC profile which is managed
+ returned: On success if vNIC profile is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+vnic:
+ description: "Dictionary of all the vNIC profile attributes. Network interface attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
+ returned: On success if vNIC profile is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_name,
+ get_id_by_name
+)
+
+
+class EntityVnicPorfileModule(BaseModule):
+
+ def __init__(self, *args, **kwargs):
+ super(EntityVnicPorfileModule, self).__init__(*args, **kwargs)
+
+ def _get_dcs_service(self):
+ return self._connection.system_service().data_centers_service()
+
+ def _get_dcs_id(self):
+ return get_id_by_name(self._get_dcs_service(), self.param('data_center'))
+
+ def _get_network_id(self):
+ networks_service = self._get_dcs_service().service(self._get_dcs_id()).networks_service()
+ return get_id_by_name(networks_service, self.param('network'))
+
+ def _get_qos_id(self):
+ if self.param('qos'):
+ qoss_service = self._get_dcs_service().service(self._get_dcs_id()).qoss_service()
+ return get_id_by_name(qoss_service, self.param('qos')) if self.param('qos') else None
+ return None
+
+ def _get_network_filter_id(self):
+ nf_service = self._connection.system_service().network_filters_service()
+ return get_id_by_name(nf_service, self.param('network_filter')) if self.param('network_filter') else None
+
+ def _get_network_filter(self):
+ network_filter = None
+ # The order of these condition is necessary.
+ # When would network_filter and pass_through specified it would try to create and network_filter and fail on engine.
+ if self.param('network_filter') == '' or self.param('pass_through') == 'enabled':
+ network_filter = otypes.NetworkFilter()
+ elif self.param('network_filter'):
+ network_filter = otypes.NetworkFilter(id=self._get_network_filter_id())
+ return network_filter
+
+ def _get_qos(self):
+ qos = None
+ # The order of these condition is necessary. When would qos and pass_through specified it would try to create and qos and fail on engine.
+ if self.param('qos') == '' or self.param('pass_through') == 'enabled':
+ qos = otypes.Qos()
+ elif self.param('qos'):
+ qos = otypes.Qos(id=self._get_qos_id())
+ return qos
+
+ def _get_port_mirroring(self):
+ if self.param('pass_through') == 'enabled':
+ return False
+ return self.param('port_mirroring')
+
+ def _get_migratable(self):
+ if self.param('migratable') is not None:
+ return self.param('migratable')
+ if self.param('pass_through') == 'enabled':
+ return True
+
+ def build_entity(self):
+ return otypes.VnicProfile(
+ name=self.param('name'),
+ network=otypes.Network(id=self._get_network_id()),
+ description=self.param('description') if self.param('description') is not None else None,
+ pass_through=otypes.VnicPassThrough(mode=otypes.VnicPassThroughMode(self.param('pass_through'))) if self.param('pass_through') else None,
+ custom_properties=[
+ otypes.CustomProperty(
+ name=cp.get('name'),
+ regexp=cp.get('regexp'),
+ value=str(cp.get('value')),
+ ) for cp in self.param('custom_properties') if cp
+ ] if self.param('custom_properties') else None,
+ migratable=self._get_migratable(),
+ qos=self._get_qos(),
+ port_mirroring=self._get_port_mirroring(),
+ network_filter=self._get_network_filter()
+ )
+
+ def update_check(self, entity):
+ def check_custom_properties():
+ if self.param('custom_properties'):
+ current = []
+ if entity.custom_properties:
+ current = [(cp.name, cp.regexp, str(cp.value)) for cp in entity.custom_properties]
+ passed = [(cp.get('name'), cp.get('regexp'), str(cp.get('value'))) for cp in self.param('custom_properties') if cp]
+ return sorted(current) == sorted(passed)
+ return True
+
+ pass_through = getattr(entity.pass_through.mode, 'name', None)
+ return (
+ check_custom_properties() and
+ # The reason why we can't use equal method, is we get None from _get_network_filter_id or _get_qos_id method, when passing empty string.
+ # And when first param of equal method is None it returns true.
+ self._get_network_filter_id() == getattr(entity.network_filter, 'id', None) and
+ self._get_qos_id() == getattr(entity.qos, 'id', None) and
+ equal(self.param('migratable'), getattr(entity, 'migratable', None)) and
+ equal(self.param('pass_through'), pass_through.lower() if pass_through else None) and
+ equal(self.param('description'), entity.description) and
+ equal(self.param('port_mirroring'), getattr(entity, 'port_mirroring', None))
+ )
+
+
+def get_entity(vnic_services, entitynics_module):
+ vnic_profiles = vnic_services.list()
+ network_id = entitynics_module._get_network_id()
+ for vnic in vnic_profiles:
+ # When vNIC already exist update it, when not create it
+ if vnic.name == entitynics_module.param('name') and network_id == vnic.network.id:
+ return vnic
+
+
+def check_params(module):
+ if (module.params.get('port_mirroring') or module.params.get('network_filter') or module.params.get('qos'))\
+ and module.params.get('pass_through') == 'enabled':
+ module.fail_json(msg="Cannot edit VM network interface profile. 'Port Mirroring,'Qos' and 'Network Filter' are not supported on passthrough profiles.")
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ network=dict(type='str', required=True),
+ data_center=dict(type='str', required=True),
+ description=dict(type='str'),
+ name=dict(type='str', required=True),
+ network_filter=dict(type='str'),
+ custom_properties=dict(type='list', elements='dict'),
+ qos=dict(type='str'),
+ pass_through=dict(type='str', choices=['disabled', 'enabled']),
+ port_mirroring=dict(type='bool'),
+ migratable=dict(type='bool'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+
+ )
+ check_sdk(module)
+ check_params(module)
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+
+ vnic_services = connection.system_service().vnic_profiles_service()
+
+ entitynics_module = EntityVnicPorfileModule(
+ connection=connection,
+ module=module,
+ service=vnic_services,
+ )
+ state = module.params['state']
+ entity = get_entity(vnic_services, entitynics_module)
+ if state == 'present':
+ ret = entitynics_module.create(entity=entity, force_create=entity is None)
+ elif state == 'absent':
+ if entity is not None:
+ ret = entitynics_module.remove(entity=entity)
+ else:
+ raise Exception("Vnic profile '%s' in network '%s' was not found." % (module.params['name'], module.params['network']))
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile_info.py b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile_info.py
new file mode 100644
index 000000000..69a451d38
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile_info.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vnic_profile_info
+short_description: Retrieve information about one or more oVirt/RHV vnic profiles
+version_added: "1.0.0"
+author: "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV vnic profiles."
+notes:
+ - "This module returns a variable C(ovirt_vnic_profiles), which
+ contains a list of vnic profiles. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ max:
+ description:
+ - "The maximum number of results to return."
+ type: int
+ name:
+ description:
+ - "Name of vnic profile."
+ type: str
+ follow:
+ description:
+ - List of linked entities, which should be fetched along with the main entity.
+ - This parameter replaces usage of C(fetch_nested) and C(nested_attributes).
+ - "All follow parameters can be found at following url: https://ovirt.github.io/ovirt-engine-api-model/master/#types/vnic_profile/links_summary"
+ type: list
+ version_added: 1.5.0
+ elements: str
+ aliases: ['follows']
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information 10 vnic profiles
+- ovirt.ovirt.ovirt_vnic_profile_info:
+ max: 10
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_vnic_profiles }}"
+'''
+
+RETURN = '''
+ovirt_vnic_profiles:
+ description: "List of dictionaries describing the vnic profiles. Vnic_profile attributes are mapped to dictionary keys,
+ all vnic profiles attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vnic_profile."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ max=dict(default=None, type='int'),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ if module.params['fetch_nested'] or module.params['nested_attributes']:
+ module.deprecate(
+ "The 'fetch_nested' and 'nested_attributes' are deprecated please use 'follow' parameter",
+ version='3.0.0',
+ collection_name='ovirt.ovirt'
+ )
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vnic_profiles_service = connection.system_service().vnic_profiles_service()
+ vnic_profiles = vnic_profiles_service.list(
+ max=module.params.get('max'),
+ follow=",".join(module.params['follow'])
+ )
+ if module.params.get('name') and vnic_profiles:
+ vnic_profiles = [vnic_profile for vnic_profile in vnic_profiles if vnic_profile.name == module.params.get("name")]
+
+ result = dict(
+ ovirt_vnic_profiles=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vnic_profiles
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/ovirt/ovirt/plugins/test/ovirt_proxied_check.py b/ansible_collections/ovirt/ovirt/plugins/test/ovirt_proxied_check.py
new file mode 100644
index 000000000..f65ea2b51
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/plugins/test/ovirt_proxied_check.py
@@ -0,0 +1,46 @@
+# ovirt-hosted-engine-setup -- ovirt hosted engine setup
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = ''' Check if un URL will be accessed through a proxy '''
+
+try:
+ from urllib import getproxies_environment
+ from urllib import proxy_bypass
+ from urlparse import urlparse
+except ImportError:
+ from urllib.request import getproxies_environment
+ from urllib.request import proxy_bypass
+ from urllib.parse import urlparse
+
+
+def proxied(value):
+ netloc = urlparse(value).netloc
+ proxied = bool(getproxies_environment()) and not proxy_bypass(netloc)
+ return(proxied)
+
+
+class TestModule(object):
+ ''' Ansible jinja2 tests '''
+
+ def tests(self):
+ return {
+ 'proxied': proxied,
+ }
diff --git a/ansible_collections/ovirt/ovirt/requirements.txt b/ansible_collections/ovirt/ovirt/requirements.txt
new file mode 100644
index 000000000..e3d7fe28d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/requirements.txt
@@ -0,0 +1,2 @@
+ovirt-engine-sdk-python>=4.5.0
+ovirt-imageio \ No newline at end of file
diff --git a/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/README.md b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/README.md
new file mode 100644
index 000000000..066bf2b0a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/README.md
@@ -0,0 +1,53 @@
+oVirt Cluster Upgrade
+=========
+
+The `cluster_upgrade` role iterates through all the hosts in a cluster and upgrades them.
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------------|-----------------------|-----------------------------------------------------|
+| cluster_name | Default | Name of the cluster to be upgraded. |
+| stopped_vms | UNDEF | List of virtual machines to stop before upgrading. |
+| stop_non_migratable_vms <br/> <i>alias: stop_pinned_to_host_vms</i> | false | Specify whether to stop virtual machines pinned to the host being upgraded. If true, the pinned non-migratable virtual machines will be stopped and host will be upgraded, otherwise the host will be skipped. |
+| upgrade_timeout | 3600 | Timeout in seconds to wait for host to be upgraded. |
+| host_statuses | [UP] | List of host statuses. If a host is in any of the specified statuses then it will be upgraded. |
+| host_names | [\*] | List of host names to be upgraded. |
+| check_upgrade | false | If true, run check_for_upgrade action on all hosts before executing upgrade on them. If false, run upgrade only for hosts with available upgrades and ignore all other hosts. |
+| reboot_after_upgrade | true | If true reboot hosts after successful upgrade. |
+| use_maintenance_policy | true | If true the cluster policy will be switched to cluster_maintenance during upgrade otherwise the policy will be unchanged. |
+| healing_in_progress_checks | 6 | Maximum number of attempts to check if gluster healing is still in progress. |
+| healing_in_progress_check_delay | 300 | The delay in seconds between each attempt to check if gluster healing is still in progress. |
+| wait_to_finish_healing | 5 | Delay in minutes to wait to finish gluster healing process after successful host upgrade. |
+| engine_correlation_id | <i>Randomly generated UUID</i> | The correlation id with which be the role run. |
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ cluster_name: production
+ stopped_vms:
+ - openshift-master-0
+ - openshift-node-0
+ - openshift-node-image
+
+ roles:
+ - cluster_upgrade
+ collections:
+ - ovirt.ovirt
+```
+
+[![asciicast](https://asciinema.org/a/122760.png)](https://asciinema.org/a/122760)
diff --git a/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/defaults/main.yml
new file mode 100644
index 000000000..25c50b802
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/defaults/main.yml
@@ -0,0 +1,16 @@
+---
+# stop_pinned_to_host_vms is alias for stop_non_migratable_vms
+stop_non_migratable_vms: "{{ stop_pinned_to_host_vms | default(false) }}"
+upgrade_timeout: 3600
+cluster_name: Default
+check_upgrade: false
+reboot_after_upgrade: true
+use_maintenance_policy: true
+host_statuses:
+ - up
+host_names:
+ - '*'
+pinned_vms_names: []
+healing_in_progress_checks: 6
+healing_in_progress_check_delay: 300
+wait_to_finish_healing: 5
diff --git a/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/cluster_upgrade.yml b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/cluster_upgrade.yml
new file mode 100644
index 000000000..37c57aea6
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/cluster_upgrade.yml
@@ -0,0 +1,26 @@
+---
+- name: oVirt cluster upgrade
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt.example.com
+ engine_user: admin@internal
+
+ cluster_name: mycluster
+ stop_non_migratable_vms: true
+ host_statuses:
+ - up
+ host_names:
+ - myhost1
+ - myhost2
+
+ roles:
+ - cluster_upgrade
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/passwords.yml b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/passwords.yml
new file mode 100644
index 000000000..92c7613c9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/cluster_policy.yml b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/cluster_policy.yml
new file mode 100644
index 000000000..1c1806789
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/cluster_policy.yml
@@ -0,0 +1,25 @@
+---
+- name: Get name of the original scheduling policy
+ ovirt_scheduling_policy_info:
+ auth: "{{ ovirt_auth }}"
+ id: "{{ cluster_info.ovirt_clusters[0].scheduling_policy.id }}"
+ check_mode: "no"
+ register: sp_info
+
+- name: Remember the cluster scheduling policy
+ ansible.builtin.set_fact:
+ cluster_scheduling_policy: "{{ sp_info.ovirt_scheduling_policies[0].name }}"
+
+- name: Remember the cluster scheduling policy properties
+ ansible.builtin.set_fact:
+ cluster_scheduling_policy_properties: "{{ cluster_info.ovirt_clusters[0].custom_scheduling_policy_properties }}"
+
+- name: Set in cluster upgrade policy
+ ovirt_cluster:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ cluster_name }}"
+ scheduling_policy: cluster_maintenance
+ register: cluster_policy
+ when:
+ - (api_info.ovirt_api.product_info.version.major >= 4 and api_info.ovirt_api.product_info.version.major >= 2) or
+ (api_info.ovirt_api.product_info.version.major == 4 and api_info.ovirt_api.product_info.version.major == 1 and api_info.ovirt_api.product_info.version.revision >= 4) # noqa yaml[line-length]
diff --git a/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/log_progress.yml b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/log_progress.yml
new file mode 100644
index 000000000..088a8f5ee
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/log_progress.yml
@@ -0,0 +1,41 @@
+---
+# vars:
+# progress: % complete
+# cluster_name: (if available) what cluster is being worked on
+# host_name: (if avaiable) what host in the cluster is being worked on
+# description: what part of the process is actually done
+
+- name: Log process block
+ block:
+ - name: Log progress as an event
+ vars:
+ message:
+ - "Cluster upgrade progress: {{ progress }}%"
+ - "{{ ', Cluster: ' + cluster_name if (cluster_name is defined and cluster_name) else '' }}"
+ - "{{ ', Host: ' + host_name if (host_name is defined and host_name) else '' }}"
+ - " [{{ description }}]"
+ ovirt_event:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ severity: normal
+ custom_id: "{{ 2147483647 | random | int }}"
+ origin: "cluster_upgrade"
+ description: "{{ message | join('') }}"
+ cluster: "{{ cluster_id | default(omit) }}"
+
+ - name: Update the upgrade progress on the cluster
+ no_log: false
+ ansible.builtin.uri:
+ url: "{{ ovirt_auth.url }}/clusters/{{ cluster_id }}/upgrade"
+ method: POST
+ body_format: json
+ validate_certs: false
+ headers:
+ Authorization: "Bearer {{ ovirt_auth.token }}"
+ Correlation-Id: "{{ engine_correlation_id | default(omit) }}"
+ body:
+ upgrade_action: update_progress
+ upgrade_percent_complete: "{{ progress }}"
+ when:
+ - api_gt45 is defined and api_gt45
+ - upgrade_set is defined
diff --git a/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/main.yml
new file mode 100644
index 000000000..a18c62f40
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/main.yml
@@ -0,0 +1,283 @@
+---
+## https://github.com/ansible/ansible/issues/22397
+## Ansible 2.3 generates a WARNING when using {{ }} in defaults variables of role
+## this workarounds it until Ansible resolves the issue:
+- name: Initialize variables
+ ansible.builtin.set_fact:
+ stop_non_migratable_vms: "{{ stop_non_migratable_vms }}"
+ provided_token: "{{ engine_token | default(lookup('env','OVIRT_TOKEN')) | default('') }}"
+ engine_correlation_id: "{{ 99999999 | random | to_uuid }}"
+
+- name: Main block
+ block:
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ hostname: "{{ engine_fqdn | default(lookup('env','OVIRT_HOSTNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ token: "{{ engine_token | default(lookup('env','OVIRT_TOKEN')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ headers:
+ correlation-id: "{{ engine_correlation_id | default(omit) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: login_result
+ tags:
+ - always
+
+ - name: progress 0% - need to do info lookups
+ include_tasks: log_progress.yml
+ vars:
+ progress: 0
+ description: "gathering cluster info"
+
+ - name: Get API info
+ ovirt_api_info:
+ auth: "{{ ovirt_auth }}"
+ check_mode: "no"
+ register: api_info
+
+ - name: Get cluster info
+ ovirt_cluster_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ cluster_name }}"
+ follow: gluster_volumes
+ check_mode: "no"
+ register: cluster_info
+
+ - name: Remember the api version and cluster id
+ ansible.builtin.set_fact:
+ api_gt43: "{{ api_info.ovirt_api.product_info.version.major >= 4 and api_info.ovirt_api.product_info.version.minor >= 3 }}"
+ api_gt45: "{{ api_info.ovirt_api.product_info.version.major >= 4 and api_info.ovirt_api.product_info.version.minor >= 5 }}"
+ cluster_id: "{{ cluster_info.ovirt_clusters[0].id }}"
+
+ - name: progress 2% - cluster upgrade is starting
+ include_tasks: log_progress.yml
+ vars:
+ progress: 2
+ description: "starting upgrade"
+
+ - name: Set cluster upgrade status to running
+ no_log: false
+ ansible.builtin.uri:
+ url: "{{ ovirt_auth.url }}/clusters/{{ cluster_id }}/upgrade"
+ method: POST
+ body_format: json
+ validate_certs: false
+ headers:
+ Authorization: "Bearer {{ ovirt_auth.token }}"
+ Correlation-Id: "{{ engine_correlation_id | default(omit) }}"
+ body:
+ upgrade_action: start
+ when: api_gt43
+ register: upgrade_set
+
+ - name: progress 4% - all necessary info is all looked up, hosts can now be upgraded
+ include_tasks: log_progress.yml
+ vars:
+ progress: 4
+ description: "collecting hosts to upgrade"
+
+ - name: Get hosts
+ ovirt_host_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: >-
+ cluster={{ cluster_name | mandatory }}
+ {{ check_upgrade | ternary('', 'update_available=true') }}
+ {{ host_names | map('regex_replace', '^(.*)$', 'name=\1') | list | join(' or ') }}
+ {{ host_statuses | map('regex_replace', '^(.*)$', 'status=\1') | list | join(' or ') }}
+ check_mode: "no"
+ register: host_info
+
+ - name: No hosts to be upgraded block
+ block:
+ - name: Print - no hosts to be upgraded
+ ansible.builtin.debug:
+ msg: "No hosts to be upgraded"
+
+ - name: progress 100% - no host need to be upgraded!
+ include_tasks: log_progress.yml
+ vars:
+ progress: 100
+ description: "no hosts need to be upgraded!"
+
+ - name: Log event - no hosts to be upgraded
+ ovirt_event:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ description: "Upgrade of cluster {{ cluster_name }} complete, there are no hosts to be upgraded."
+ origin: "cluster_upgrade"
+ custom_id: "{{ 2147483647 | random | int }}"
+ severity: normal
+ cluster: "{{ cluster_id }}"
+
+ when: host_info.ovirt_hosts | length == 0
+
+ - name: Upgrade block
+ block:
+ - name: Start ovirt job session
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts in {{ cluster_name }}"
+
+ - name: progress 6% - log hosts that are marked to be upgraded
+ include_tasks: log_progress.yml
+ vars:
+ progress: 6
+ description: "hosts to check for pinned VMs: {{ host_info.ovirt_hosts | map(attribute='name') | join(',') }}"
+
+ - name: Change cluster scheduling_policy to cluster_maintenance
+ include_tasks: cluster_policy.yml
+ when: use_maintenance_policy
+
+ - name: Determine what hosts have running pinned vms, they will not be upgraded
+ include_tasks: pinned_vms.yml
+
+ - name: Build the list of hosts that will be upgraded (hosts in host_info.ovirt_hosts hosts w/o pinned vms that cannot be stopped)
+ ansible.builtin.set_fact:
+ good_hosts: "{{ (good_hosts | default([])) | list + [ host ] | list }}"
+ loop: "{{ host_info.ovirt_hosts | flatten(levels=1) }}"
+ loop_control:
+ loop_var: "host"
+ when: "host.id not in host_ids or stop_non_migratable_vms"
+
+ - name: progress 8% - log hosts that will be upgraded
+ include_tasks: log_progress.yml
+ vars:
+ progress: 8
+ description: "hosts to be upgraded: {{ good_hosts | map(attribute='name') | join(',') }}"
+
+ - name: progress 10% - host upgrades starting
+ include_tasks: log_progress.yml
+ vars:
+ progress: 10
+ description: "starting the upgrade of {{ good_hosts | length }} hosts"
+
+ # Upgrade only those hosts that aren't in list of hosts were VMs are pinned
+ # or if stop_non_migratable_vms is enabled, which means we stop pinned VMs
+ # Note: Progress goes from 10% to 95%, each host taking up an equal amount of progress
+ - name: Upgrade the hosts in the cluster
+ include_tasks: upgrade.yml
+ vars:
+ progress_start: 10
+ progress_end: 95
+ loop: "{{ good_hosts | flatten(levels=1) }}"
+ loop_control:
+ extended: true
+ loop_var: "host"
+
+ - name: Finish ovirt job session
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts in {{ cluster_name }}"
+ state: finished
+
+ - name: progress 95% - host upgrades completed successfully, only thing left is to start any non-migratable VMs stopped by the playbook
+ include_tasks: log_progress.yml
+ vars:
+ progress: 95
+ description: "the upgrade of {{ good_hosts | length }} hosts finished successfully"
+
+ - name: Log event - cluster upgrade finished successfully
+ ovirt_event:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ description: "Upgrade of cluster {{ cluster_name }} finished successfully."
+ origin: "cluster_upgrade"
+ severity: normal
+ custom_id: "{{ 2147483647 | random | int }}"
+ cluster: "{{ cluster_id }}"
+
+ when: host_info.ovirt_hosts | length > 0
+
+ rescue:
+ - name: Log event - cluster upgrade failed
+ ovirt_event:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ description: "Upgrade of cluster {{ cluster_name }} failed."
+ origin: "cluster_upgrade"
+ custom_id: "{{ 2147483647 | random | int }}"
+ severity: error
+ cluster: "{{ cluster_id }}"
+
+ - name: Fail ovirt job session
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts in {{ cluster_name }}"
+ state: failed
+
+ - name: progress 95% - host upgrades failed, only thing left is to start any non-migratable VMs stopped by the playbook
+ include_tasks: log_progress.yml
+ vars:
+ progress: 95
+ description: "hosts upgrades failed"
+
+ always:
+ - name: Set original cluster policy
+ ovirt_cluster:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ cluster_name }}"
+ scheduling_policy: "{{ cluster_scheduling_policy }}"
+ scheduling_policy_properties: "{{ cluster_scheduling_policy_properties }}"
+ when: use_maintenance_policy and cluster_policy.changed | default(false)
+
+ - name: progress 95% - host upgrades are done (successful or not), only need to start VMs that were stopped by the playbook
+ include_tasks: log_progress.yml
+ vars:
+ progress: 95
+ description: "host upgrades are done (successful or not), restarting non-migratable VMs"
+
+ # TODO: These VMs aren't explicity stopped anywhere...should they be?
+ - name: Start again stopped VMs
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item }}"
+ state: running
+ cluster: "{{ cluster_name }}"
+ ignore_errors: true
+ loop: "{{ stopped_vms | default([]) | flatten(levels=1) }}"
+
+ - name: Start again pin to host VMs
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item }}"
+ state: running
+ cluster: "{{ cluster_name }}"
+ ignore_errors: true
+ loop: "{{ pinned_vms_names | default([]) | flatten(levels=1) }}"
+ when: stop_non_migratable_vms
+
+ - name: progress 100% - host upgrades are done (successful or not), non-migratable VMs are started, everything is now done
+ include_tasks: log_progress.yml
+ vars:
+ progress: 100
+ description: "host upgrades are done, non-migratable VMs are restarted"
+
+ always:
+ - name: Set cluster upgrade status to finished
+ no_log: true
+ ansible.builtin.uri:
+ url: "{{ ovirt_auth.url }}/clusters/{{ cluster_id }}/upgrade"
+ validate_certs: false
+ method: POST
+ body_format: json
+ headers:
+ Authorization: "Bearer {{ ovirt_auth.token }}"
+ Correlation-Id: "{{ engine_correlation_id | default(omit) }}"
+ body:
+ upgrade_action: finish
+ when:
+ - upgrade_set is defined and not upgrade_set.failed | default(false)
+ - api_gt43
+
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when:
+ - login_result.skipped is undefined or not login_result.skipped
+ - provided_token != ovirt_auth.token
+ tags:
+ - always
diff --git a/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/pinned_vms.yml b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/pinned_vms.yml
new file mode 100644
index 000000000..32ec73672
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/pinned_vms.yml
@@ -0,0 +1,23 @@
+---
+- name: Get list of VMs in cluster
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "cluster={{ cluster_name }}"
+ check_mode: "no"
+ register: vms_in_cluster
+
+- name: Create list of host IDs which has running non-migratable VM and are not down
+ ansible.builtin.set_fact:
+ host_ids_items: "{{ item.host.id }}"
+ with_items:
+ - "{{ vms_in_cluster.ovirt_vms | default([]) }}"
+ when:
+ - "item['placement_policy']['affinity'] != 'migratable'"
+ - "item.host is defined"
+ loop_control:
+ label: "{{ item.name }}"
+ register: host_ids_result
+
+- name: Create list of host IDs which has pinned VM
+ ansible.builtin.set_fact:
+ host_ids: "{{ host_ids_result.results | rejectattr('ansible_facts', 'undefined') | map(attribute='ansible_facts.host_ids_items') | list }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/upgrade.yml b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/upgrade.yml
new file mode 100644
index 000000000..67f30384a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/upgrade.yml
@@ -0,0 +1,156 @@
+---
+# Upgrade uses a block to keep the variables local to the block. Vars are defined after tasks.
+- name: Upgrade block
+ block:
+
+ - name: Start prep host ovirt job step
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts in {{ cluster_name }}"
+ steps:
+ - description: "Preparing host for upgrade: {{ host_name }}"
+
+ - name: progress - prepare host for upgrade (upgrade can't start until no VMs are running on the host)
+ include_tasks: log_progress.yml
+ vars:
+ progress: "{{ progress_host_start | int }}"
+ description: "preparing host for upgrade"
+
+ - name: Get list of VMs in host
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "cluster={{ cluster_name }} and host={{ host_name }} and status=up"
+ register: vms_in_host
+ check_mode: "no"
+
+ - name: Move user migratable vms
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ force_migrate: true
+ migrate: true
+ state: running
+ name: "{{ item.name }}"
+ register: resp
+ when:
+ - "item['placement_policy']['affinity'] == 'user_migratable'"
+ with_items:
+ - "{{ vms_in_host.ovirt_vms }}"
+ loop_control:
+ label: "{{ item.name }}"
+
+ - name: progress - done migrating VMs (host 10% complete)
+ include_tasks: log_progress.yml
+ vars:
+ progress: "{{ (progress_host_start | int + (progress_host_step_size | int * 0.10)) | int }}"
+ description: "status=up VMs migrated off host"
+
+ - name: Shutdown non-migratable VMs
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: stopped
+ force: true
+ name: "{{ item.name }}"
+ with_items:
+ - "{{ vms_in_host.ovirt_vms }}"
+ when:
+ - "item['placement_policy']['affinity'] == 'pinned'"
+ loop_control:
+ label: "{{ item.name }}"
+ register: pinned_to_host_vms
+
+ - name: Create list of VM names which have been shut down
+ ansible.builtin.set_fact:
+ pinned_vms_names: "{{ pinned_vms_names + pinned_to_host_vms.results | selectattr('changed') | map(attribute='vm.name') | list }}"
+
+ - name: progress - done shutting down pinned VMs (host 20% complete)
+ include_tasks: log_progress.yml
+ vars:
+ progress: "{{ (progress_host_start | int + (progress_host_step_size | int * 0.20)) | int }}"
+ description: "pinned VMs shutdown"
+
+ - name: Gather self-heal facts about all gluster hosts in the cluster
+ ansible.builtin.command: gluster volume heal {{ volume_item.name }} info
+ register: self_heal_status
+ retries: "{{ healing_in_progress_checks }}"
+ delay: "{{ healing_in_progress_check_delay }}"
+ until: >
+ self_heal_status.stdout_lines is defined and
+ self_heal_status.stdout_lines | select('match','^(Number of entries: )[0-9]+') | map('last') | map('int') | sum == 0
+ delegate_to: "{{ host_info.ovirt_hosts[0].address }}"
+ connection: ssh
+ with_items:
+ - "{{ cluster_info.ovirt_clusters[0].gluster_volumes }}"
+ loop_control:
+ loop_var: volume_item
+ when: cluster_info.ovirt_clusters[0].gluster_service | bool
+
+ - name: Refresh gluster heal info entries to database
+ ansible.builtin.uri:
+ url: "{{ ovirt_auth.url }}/clusters/{{ cluster_id }}/refreshglusterhealstatus"
+ method: POST
+ body_format: json
+ validate_certs: false
+ headers:
+ Authorization: "Bearer {{ ovirt_auth.token }}"
+ body: "{}"
+ when:
+ - cluster_info.ovirt_clusters[0].gluster_service | bool
+ - api_info.ovirt_api.product_info.version.major >= 4 and api_info.ovirt_api.product_info.version.minor >= 4
+
+ - name: progress - host is ready for upgrade (host 30% complete)
+ include_tasks: log_progress.yml
+ vars:
+ progress: "{{ (progress_host_start | int + (progress_host_step_size | int * 0.30)) | int }}"
+ description: "host is ready for upgrade"
+
+ - name: Finish prep host ovirt job step
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts in {{ cluster_name }}"
+ steps:
+ - description: "Preparing host for upgrade: {{ host_name }}"
+ state: finished
+
+ - name: Start upgrade host ovirt job step
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts in {{ cluster_name }}"
+ steps:
+ - description: "Upgrading host: {{ host_name }}"
+
+ - name: Upgrade host
+ ovirt_host:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ host_name }}"
+ state: upgraded
+ check_upgrade: "{{ check_upgrade }}"
+ reboot_after_upgrade: "{{ reboot_after_upgrade }}"
+ timeout: "{{ upgrade_timeout }}"
+
+ - name: Delay in minutes to wait to finish gluster healing process after successful host upgrade
+ ansible.builtin.pause:
+ minutes: "{{ wait_to_finish_healing }}"
+ when:
+ - cluster_info.ovirt_clusters[0].gluster_service | bool
+ - host_info.ovirt_hosts | length > 1
+
+ - name: progress - host upgrade complete (host 100% complete)
+ include_tasks: log_progress.yml
+ vars:
+ progress: "{{ (progress_host_start | int + (progress_host_step_size | int * 1.00)) | int }}"
+ description: "host upgrade complete"
+
+ - name: Finish upgrade host ovirt job step
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts in {{ cluster_name }}"
+ steps:
+ - description: "Upgrading host: {{ host_name }}"
+ state: finished
+
+ vars:
+ host_name: "{{ host.name }}"
+ my_step: "{{ ansible_loop.index | int }}"
+ progress_host_step_size: "{{ ((progress_end - progress_start) / ansible_loop.length) | round | int }}"
+ progress_host_start: "{{ progress_start | int + (progress_host_step_size | int * (my_step | int - 1)) }}"
+ progress_host_end: "{{ progress_start | int + (progress_host_step_size | int * my_step | int) }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/README.md b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/README.md
new file mode 100644
index 000000000..56886ba4f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/README.md
@@ -0,0 +1,77 @@
+oVirt Disaster Recovery
+=========
+
+The `disaster_recovery` role responsible to manage the disaster recovery scenarios in oVirt.
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------------|-----------------------|-----------------------------------------------------|
+| dr_ignore_error_clean | False | Specify whether to ignore errors on clean engine setup.<br/>This is mainly being used to avoid failures when trying to move a storage domain to maintenance/detach it. |
+| dr_ignore_error_recover | True | Specify whether to ignore errors on recover. |
+| dr_partial_import | True | Specify whether to use the partial import flag on VM/Template register.<br/>If True, VMs and Templates will be registered without any missing disks, if false VMs/Templates will fail to be registered in case some of their disks will be missing from any of the storage domains. |
+| dr_target_host | secondary | Specify the default target host to be used in the ansible play.<br/> This host indicates the target site which the recover process will be done. |
+| dr_source_map | primary | Specify the default source map to be used in the play.<br/> The source map indicates the key which is used to get the target value for each attribute which we want to register with the VM/Template. |
+| dr_reset_mac_pool | True | If True, then once a VM will be registered, it will automatically reset the mac pool, if configured in the VM. |
+| dr_cleanup_retries_maintenance | 3 | Specify the number of retries of moving a storage domain to maintenance VM as part of a fail back scenario. |
+| dr_cleanup_delay_maintenance | 120 | Specify the number of seconds between each retry as part of a fail back scenario. |
+| dr_clean_orphaned_vms | True | Specify whether to remove any VMs which have no disks from the setup as part of cleanup. |
+| dr_clean_orphaned_disks | True | Specify whether to remove lun disks from the setup as part of engine setup. |
+| dr_running_vms | /tmp/ovirt_dr_running_vm_list | Specify the file path which is used to contain the data of the running VMs in the secondary setup before the failback process run on the primary setup after the secondary site cleanup was finished. Note that the /tmp folder is being used as default so the file will not be available after system reboot.
+
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: Setup oVirt environment
+ hosts: localhost
+ connection: local
+ vars_files:
+ - ovirt_passwords.yml
+ - disaster_recovery_vars.yml
+ roles:
+ - disaster_recovery
+ collections:
+ - ovirt.ovirt
+```
+
+Generate var file mapping [demo](https://youtu.be/s1-Hq_Mk1w8)
+<br/>
+Fail over scenario [demo](https://youtu.be/mEOgH-Tk09c)
+
+Scripts
+-------
+The ovirt-dr script should provide the user a more convenient way to run
+disaster recovery actions as a way to avoid using ansible playbooks directly.
+There are four actions which the user can execute:
+- `generate` Generate the mapping var file based on the primary and secondary setup, to be used for failover and failback
+- `validate` Validate the var file mapping which is used for failover and failback
+- `failover` Start a failover process to the target setup
+- `failback` Start a failback process from the target setup to the source setup
+
+Each of those actions are using a configuration file whose default location is `disaster_recovery/files/dr.conf`<br/>
+The configuration file's location can be changed using `--conf-file` flag in the `ovirt-dr` script.<br/>
+Log file and log level can be configured as well through the `ovirt-dr` script using the flags `--log-file` and `--log-level`
+
+
+Example Script
+--------------
+For mapping file generation (from the `./roles/disaster_recovery/files/` directory):
+```console
+$ ./ovirt-dr generate --log-file=ovirt-dr.log --log-level=DEBUG
+```
+For mapping file validation:
+```console
+$ ./ovirt-dr validate
+```
+For fail-over operation:
+```console
+$ ./ovirt-dr failover
+```
+For fail-back operation:
+```console
+$ ./ovirt-dr failback
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/defaults/main.yml
new file mode 100644
index 000000000..ef07c479e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/defaults/main.yml
@@ -0,0 +1,37 @@
+# Indicate whether to ignore errors on clean engine setup.
+dr_ignore_error_clean: "False"
+
+# Indicate whether to ignore errors on recover.
+dr_ignore_error_recover: "True"
+
+# Indicate whether to use the partial import flag when registering VMs and Templates.
+dr_partial_import: "True"
+
+# Indicate the default target host to be used in the play.
+dr_target_host: "secondary"
+
+# Indicate the default source map to be used in the play.
+dr_source_map: "primary"
+
+# Indicate whether to reset a mac pool of a VM on register.
+dr_reset_mac_pool: "True"
+
+# Indicate the number of retries of moving a storage domain to maintenance (In case of a failure because of running tasks).
+dr_cleanup_retries_maintenance: 3
+
+# Indicate the number of seconds between each maintenance retry (In case of a failure because of running tasks).
+dr_cleanup_delay_maintenance: 120
+
+# Indicate whether to remove any VMs which have no disks from the setup as part of cleanup.
+dr_clean_orphaned_vms: "True"
+
+# Indicate whether to remove lun disks from the setup as part of engine setup.
+dr_clean_orphaned_disks: "True"
+
+# Indicate the default entities status report file name
+dr_report_file: "report.log"
+
+# Indicate the file name which is used to contain the data of the running VMs in the secondary setup before the failback
+# run again on the primary setup after the failback will be finished.
+# Note that the /tmp folder is being used as default so the file will not be available after system reboot.
+dr_running_vms: "/tmp/ovirt_dr_running_vm_list"
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/disaster_recovery_vars.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/disaster_recovery_vars.yml
new file mode 100644
index 000000000..0c50409cc
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/disaster_recovery_vars.yml
@@ -0,0 +1,109 @@
+---
+dr_sites_primary_url: "https://engine1.example.com/ovirt-engine/api"
+dr_sites_primary_username: "admin@internal"
+dr_sites_primary_ca_file: "/etc/pki/ovirt-engine/ca.pem"
+
+dr_sites_secondary_url: "https://engine2.example.com/ovirt-engine/api"
+dr_sites_secondary_username: "admin@internal"
+dr_sites_secondary_ca_file: "/etc/pki/ovirt-engine/ca.pem"
+
+dr_import_storages:
+ - dr_domain_type: "nfs"
+ dr_primary_master_domain: "True"
+ dr_primary_address: "xx.xx.xx.xx"
+ dr_primary_path: "/export/path1"
+ dr_primary_dc_name: "Prod"
+ dr_primary_name: "primary_master_storage"
+ dr_secondary_master_domain: "True"
+ dr_secondary_address: "yy.yy.yy.yy"
+ dr_secondary_path: "/export/path1"
+ dr_secondary_dc_name: "Recovery"
+ dr_secondary_name: "secondary_master_storage"
+
+ - dr_domain_type: "nfs"
+ dr_wipe_after_delete: false
+ dr_backup: false
+ dr_critical_space_action_blocker: 5
+ dr_warning_low_space: 5
+ dr_primary_master_domain: "False"
+ dr_primary_name: "path2"
+ dr_primary_address: "xx.xx.xx.xx"
+ dr_primary_path: "/export/path2"
+ dr_primary_dc_name: "Prod"
+ dr_secondary_name: "path2"
+ dr_secondary_master_domain: false
+ dr_secondary_address: "yy.yy.yy.yy"
+ dr_secondary_path: "/export/path2"
+ dr_secondary_dc_name: "Recovery"
+
+ - dr_domain_type: "iscsi"
+ dr_wipe_after_delete: false
+ dr_backup: false
+ dr_critical_space_action_blocker: 1
+ dr_warning_low_space: 5
+ dr_primary_master_domain: "False"
+ dr_domain_id: "aa92cc71-1b88-4998-a755-970ef8a638ea"
+ dr_primary_address: "yy.yy.yy.yy"
+ dr_primary_port: 3260
+ dr_primary_target: ["iqn.2017-10.com.primary.redhat:444"]
+ dr_primary_dc_name: "Prod"
+ dr_primary_name: "scsi_domain"
+ dr_secondary_name: "scsi_domain"
+ dr_secondary_dc_name: "Recovery"
+ dr_secondary_master_domain: "False"
+ dr_secondary_address: "zz.zz.zz.zz"
+ dr_secondary_port: 3260
+ dr_secondary_target: ["iqn.2017-07.com.recovery.redhat:444"]
+
+# Mapping for cluster
+dr_cluster_mappings:
+ - primary_name: "cluster_prod"
+ secondary_name: "cluster_recovery"
+
+# Mapping for affinity group
+dr_affinity_group_mappings:
+ - primary_name: "primary_affinity"
+ secondary_name: "secondary_affinity"
+
+# Mapping for affinity label
+dr_affinity_label_mappings:
+ - primary_name: "label_prod"
+ secondary_name: "label_recovery"
+
+# Mapping for domain
+dr_domain_mappings:
+ - primary_name: "new-authz"
+ secondary_name: "internal-authz"
+
+# Mapping for roles
+dr_role_mappings:
+ - primary_name: "VmMananger"
+ secondary_name: "NeverMnd"
+
+# Mapping for vnic profile
+dr_network_mappings:
+ - primary_network_name: "ovirtmgmt"
+ primary_profile_name: "ovirtmgmt"
+ primary_profile_id: "e368cbd4-59d9-4a7e-86c1-e405c916a836"
+ secondary_network_name: "ovirtmgmt"
+ secondary_profile_name: "ovirtmgmt"
+ secondary_profile_id: "e368cbd4-59d9-4a7e-86c1-e405c916a836"
+
+# Mapping for direct LUN disks
+dr_lun_mappings:
+ - primary_logical_unit_id: "360014056a2be431c0fd46c4bdce92b66"
+ primary_storage_type: "iscsi"
+ primary_logical_unit_address: "yy.yy.yy.yy"
+ primary_logical_unit_port: 3260
+ primary_logical_unit_portal: "1"
+ primary_logical_unit_username: ""
+ primary_logical_unit_password: ""
+ primary_logical_unit_target: "iqn.2017-10.com.primary.redhat:444"
+ secondary_storage_type: "iscsi"
+ secondary_logical_unit_id: "36001405961a7f95e6aa461b8dba53052"
+ secondary_logical_unit_address: "zz.zz.zz.zz"
+ secondary_logical_unit_port: 3260
+ secondary_logical_unit_portal: "1"
+ secondary_logical_unit_username: ""
+ secondary_logical_unit_password: ""
+ secondary_logical_unit_target: "iqn.2017-10.com.recovery.redhat:444"
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_ovirt_setup.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_ovirt_setup.yml
new file mode 100644
index 000000000..e8d2ee3f4
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_ovirt_setup.yml
@@ -0,0 +1,11 @@
+---
+- name: Setup oVirt environment
+ hosts: localhost
+ connection: local
+ vars_files:
+ - ovirt_passwords.yml
+ - disaster_recovery_vars.yml
+ roles:
+ - disaster_recovery
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_play.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_play.yml
new file mode 100644
index 000000000..846fc83e8
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_play.yml
@@ -0,0 +1,8 @@
+---
+- name: Setup oVirt environment
+ hosts: localhost
+ connection: local
+ roles:
+ - disaster_recovery
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/ovirt_passwords.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/ovirt_passwords.yml
new file mode 100644
index 000000000..58c523043
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/ovirt_passwords.yml
@@ -0,0 +1,13 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+dr_sites_primary_password: 123456
+dr_sites_secondary_password: 123456
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/bcolors.py b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/bcolors.py
new file mode 100644
index 000000000..053481316
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/bcolors.py
@@ -0,0 +1,21 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class bcolors:
+ HEADER = '\033[95m'
+ OKBLUE = '\033[1;34m'
+ OKGREEN = '\033[0;32m'
+ WARNING = '\x1b[0;33m'
+ FAIL = '\033[0;31m'
+ ENDC = '\033[0m'
+
+ def disable(self):
+ self.HEADER = ''
+ self.OKBLUE = ''
+ self.OKGREEN = ''
+ self.WARNING = ''
+ self.FAIL = ''
+ self.ENDC = ''
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/dr.conf b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/dr.conf
new file mode 100644
index 000000000..dfc99a5f7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/dr.conf
@@ -0,0 +1,21 @@
+[log]
+log_file=/tmp/ovirt-dr-{}.log
+log_level=DEBUG
+
+[generate_vars]
+site=http://engine.example.com/ovirt-engine/api
+username=admin@internal
+password=
+ca_file=/etc/pki/ovirt-engine/ca.pem
+output_file=../examples/disaster_recovery_vars.yml
+ansible_play=../examples/dr_play.yml
+
+[validate_vars]
+var_file=../examples/disaster_recovery_vars.yml
+
+[failover_failback]
+dr_target_host=secondary
+dr_source_map=primary
+vault=../examples/ovirt_passwords.yml
+var_file=../examples/disaster_recovery_vars.yml
+ansible_play=../examples/dr_play.yml
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_back.py b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_back.py
new file mode 100755
index 000000000..34d087b96
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_back.py
@@ -0,0 +1,288 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import os.path
+import subprocess
+from subprocess import call
+import sys
+import time
+
+from configparser import ConfigParser
+
+from bcolors import bcolors
+
+INFO = bcolors.OKGREEN
+INPUT = bcolors.OKGREEN
+WARN = bcolors.WARNING
+FAIL = bcolors.FAIL
+END = bcolors.ENDC
+PREFIX = "[Failback] "
+VAR_FILE_DEF = "../examples/disaster_recovery_vars.yml"
+PLAY_DEF = "../examples/dr_play.yml"
+report_name = "report-{}.log"
+
+
+class FailBack:
+
+ def run(self, conf_file, log_file, log_level):
+ log = self._set_log(log_file, log_level)
+ log.info("Start failback operation...")
+ target_host, source_map, var_file, vault_file, ansible_play_file = \
+ self._init_vars(conf_file)
+ report = report_name.format(int(round(time.time() * 1000)))
+ log.info("\ntarget_host: %s \n"
+ "source_map: %s \n"
+ "var_file: %s \n"
+ "vault_file: %s \n"
+ "ansible_play_file: %s \n"
+ "report log file: /tmp/%s\n",
+ target_host,
+ source_map,
+ var_file,
+ vault_file,
+ ansible_play_file,
+ report)
+
+ dr_clean_tag = "clean_engine"
+ extra_vars_cleanup = " dr_source_map=" + target_host
+ command_cleanup = [
+ "ansible-playbook", ansible_play_file,
+ "-t", dr_clean_tag,
+ "-e", "@" + var_file,
+ "-e", "@" + vault_file,
+ "-e", extra_vars_cleanup,
+ "--vault-password-file", "vault_secret.sh",
+ "-vvv"
+ ]
+
+ dr_failback_tag = "fail_back"
+ extra_vars_failback = (" dr_target_host=" + target_host
+ + " dr_source_map=" + source_map
+ + " dr_report_file=" + report)
+ command_failback = [
+ "ansible-playbook", ansible_play_file,
+ "-t", dr_failback_tag,
+ "-e", "@" + var_file,
+ "-e", "@" + vault_file,
+ "-e", extra_vars_failback,
+ "--vault-password-file", "vault_secret.sh",
+ "-vvv"
+ ]
+
+ # Setting vault password.
+ vault_pass = input("%s%sPlease enter vault password "
+ "(in case of plain text please press ENTER): %s"
+ % (INPUT, PREFIX, END))
+ os.system("export vault_password=\"" + vault_pass + "\"")
+
+ info_msg = ("Starting cleanup process of setup '{0}' for "
+ "oVirt ansible disaster recovery".format(target_host))
+ log.info(info_msg)
+ print("\n%s%s%s%s" % (INFO, PREFIX, info_msg, END))
+
+ log.info("Executing cleanup command: %s",
+ ' '.join(map(str, command_cleanup)))
+ if log_file is not None and log_file != '':
+ self._log_to_file(log_file, command_cleanup)
+ else:
+ self._log_to_console(command_cleanup, log)
+
+ info_msg = ("Finished cleanup of setup '{0}' "
+ "for oVirt ansible disaster recovery".format(source_map))
+ log.info(info_msg)
+ print("\n%s%s%s%s" % (INFO, PREFIX, info_msg, END))
+
+ info_msg = ("Starting failback process to setup '{0}' "
+ "from setup '{1}' for oVirt ansible disaster recovery"
+ .format(target_host, source_map))
+ log.info(info_msg)
+ print("\n%s%s%s%s" % (INFO, PREFIX, info_msg, END))
+
+ log.info("Executing failback command: %s",
+ ' '.join(map(str, command_failback)))
+ if log_file is not None and log_file != '':
+ self._log_to_file(log_file, command_failback)
+ else:
+ self._log_to_console(command_failback, log)
+
+ call(["cat", "/tmp/" + report])
+ print("\n%s%sFinished failback operation"
+ " for oVirt ansible disaster recovery%s" % (INFO, PREFIX, END))
+
+ def _log_to_file(self, log_file, command):
+ with open(log_file, "a") as f:
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ if 'TASK [' in line:
+ print("\n%s%s%s\n" % (INFO, line, END))
+ if "[Failback Replication Sync]" in line:
+ print("%s%s%s" % (INFO, line, END))
+ f.write(line)
+ for line in iter(proc.stderr.readline, ''):
+ f.write(line)
+ print("%s%s%s" % (WARN, line, END))
+
+ def _log_to_console(self, command, log):
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ if "[Failback Replication Sync]" in line:
+ print("%s%s%s" % (INFO, line, END))
+ else:
+ log.debug(line)
+ for line in iter(proc.stderr.readline, ''):
+ log.warn(line)
+ self._handle_result(command)
+
+ def _handle_result(self, command):
+ try:
+ # TODO: do something with the returned output?
+ subprocess.check_output(command, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ print("%sException: %s\n\n"
+ "failback operation failed, please check log file for "
+ "further details.%s"
+ % (FAIL, e, END))
+ sys.exit()
+
+ def _init_vars(self, conf_file):
+ """ Declare constants """
+ _SECTION = "failover_failback"
+ _TARGET = "dr_target_host"
+ _SOURCE = "dr_source_map"
+ _VAULT = "vault"
+ _VAR_FILE = "var_file"
+ _ANSIBLE_PLAY = 'ansible_play'
+ setups = ['primary', 'secondary']
+
+ settings = ConfigParser()
+ settings.read(conf_file)
+ if _SECTION not in settings.sections():
+ settings.add_section(_SECTION)
+ if not settings.has_option(_SECTION, _TARGET):
+ settings.set(_SECTION, _TARGET, '')
+ if not settings.has_option(_SECTION, _SOURCE):
+ settings.set(_SECTION, _SOURCE, '')
+ if not settings.has_option(_SECTION, _VAULT):
+ settings.set(_SECTION, _VAULT, '')
+ if not settings.has_option(_SECTION, _VAR_FILE):
+ settings.set(_SECTION, _VAR_FILE, '')
+ if not settings.has_option(_SECTION, _ANSIBLE_PLAY):
+ settings.set(_SECTION, _ANSIBLE_PLAY, '')
+
+ # We fetch the source map as target host,
+ # since in failback we do the reverse operation.
+ target_host = settings.get(_SECTION, _SOURCE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ source_map=None))
+
+ # We fetch the target host as target the source mapping for failback,
+ # since we do the reverse operation.
+ source_map = settings.get(_SECTION, _TARGET,
+ vars=DefaultOption(settings,
+ _SECTION,
+ target_host=None))
+
+ vault_file = settings.get(_SECTION, _VAULT,
+ vars=DefaultOption(settings,
+ _SECTION,
+ vault=None))
+ vault_file = os.path.expanduser(vault_file)
+
+ var_file = settings.get(_SECTION, _VAR_FILE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ var_file=None))
+ var_file = os.path.expanduser(var_file)
+
+ ansible_play_file = settings.get(_SECTION, _ANSIBLE_PLAY,
+ vars=DefaultOption(settings,
+ _SECTION,
+ ansible_play=None))
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ while target_host not in setups:
+ target_host = input("%s%sThe target host '%s' was not defined. "
+ "Please provide the target host "
+ "to failback to (primary or secondary): %s"
+ % (INPUT, PREFIX, target_host, END))
+ while source_map not in setups:
+ source_map = input("%s%sThe source mapping '%s' was not defined. "
+ "Please provide the source mapping "
+ "(primary or secondary): %s"
+ % (INPUT, PREFIX, source_map, END))
+
+ while not os.path.isfile(var_file):
+ var_file = input("%s%sVar file '%s' does not exist. Please "
+ "provide the location of the var file (%s): %s"
+ % (INPUT, PREFIX, var_file, VAR_FILE_DEF, END)
+ ) or VAR_FILE_DEF
+ var_file = os.path.expanduser(var_file)
+
+ while not os.path.isfile(vault_file):
+ vault_file = input("%s%sPassword file '%s' does not exist. "
+ "Please provide a valid password file: %s"
+ % (INPUT, PREFIX, vault_file, END))
+ vault_file = os.path.expanduser(vault_file)
+
+ while not os.path.isfile(ansible_play_file):
+ ansible_play_file = input("%s%sAnsible play file '%s' does not "
+ "exist. Please provide the ansible play "
+ "file to run the failback flow (%s): %s"
+ % (INPUT,
+ PREFIX,
+ ansible_play_file,
+ PLAY_DEF,
+ END)
+ ) or PLAY_DEF
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ return target_host, source_map, var_file, vault_file, ansible_play_file
+
+ def _set_log(self, log_file, log_level):
+ logger = logging.getLogger(PREFIX)
+
+ if log_file is not None and log_file != '':
+ formatter = logging.Formatter(
+ '%(asctime)s %(levelname)s %(message)s')
+ hdlr = logging.FileHandler(log_file)
+ hdlr.setFormatter(formatter)
+ else:
+ hdlr = logging.StreamHandler(sys.stdout)
+
+ logger.addHandler(hdlr)
+ logger.setLevel(log_level)
+ return logger
+
+
+class DefaultOption(dict):
+
+ def __init__(self, config, section, **kv):
+ self._config = config
+ self._section = section
+ dict.__init__(self, **kv)
+
+ def items(self):
+ _items = []
+ for option in self:
+ if not self._config.has_option(self._section, option):
+ _items.append((option, self[option]))
+ else:
+ value_in_config = self._config.get(self._section, option)
+ _items.append((option, value_in_config))
+ return _items
+
+
+if __name__ == "__main__":
+ FailBack().run(conf_file='dr.conf',
+ log_file='/tmp/ovirt-dr.log',
+ log_level=logging.getLevelName("DEBUG"))
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_over.py b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_over.py
new file mode 100755
index 000000000..2b96dd422
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_over.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import os.path
+import subprocess
+from subprocess import call
+import sys
+import time
+
+from configparser import ConfigParser
+
+from bcolors import bcolors
+
+INFO = bcolors.OKGREEN
+INPUT = bcolors.OKGREEN
+WARN = bcolors.WARNING
+FAIL = bcolors.FAIL
+END = bcolors.ENDC
+PREFIX = "[Failover] "
+VAR_FILE_DEF = "../examples/disaster_recovery_vars.yml"
+PLAY_DEF = "../examples/dr_play.yml"
+report_name = "report-{}.log"
+
+
+class FailOver:
+
+ def run(self, conf_file, log_file, log_level):
+ log = self._set_log(log_file, log_level)
+ log.info("Start failover operation...")
+ target_host, source_map, var_file, vault_file, ansible_play_file = \
+ self._init_vars(conf_file)
+ report = report_name.format(int(round(time.time() * 1000)))
+ log.info("\ntarget_host: %s \n"
+ "source_map: %s \n"
+ "var_file: %s \n"
+ "vault_file: %s \n"
+ "ansible_play_file: %s \n"
+ "report log file: /tmp/%s\n",
+ target_host,
+ source_map,
+ var_file,
+ vault_file,
+ ansible_play_file,
+ report)
+
+ dr_tag = "fail_over"
+ extra_vars = (" dr_target_host=" + target_host
+ + " dr_source_map=" + source_map
+ + " dr_report_file=" + report)
+ command = [
+ "ansible-playbook", ansible_play_file,
+ "-t", dr_tag,
+ "-e", "@" + var_file,
+ "-e", "@" + vault_file,
+ "-e", extra_vars,
+ "--vault-password-file", "vault_secret.sh",
+ "-vvv"
+ ]
+
+ # Setting vault password.
+ vault_pass = input("%s%sPlease enter vault password "
+ "(in case of plain text please press ENTER): %s"
+ % (INPUT, PREFIX, END))
+ os.system("export vault_password=\"" + vault_pass + "\"")
+
+ log.info("Executing failover command: %s", ' '.join(map(str, command)))
+ if log_file is not None and log_file != '':
+ self._log_to_file(log_file, command)
+ else:
+ self._log_to_console(command, log)
+
+ call(["cat", "/tmp/" + report])
+ print("\n%s%sFinished failover operation"
+ " for oVirt ansible disaster recovery%s" % (INFO, PREFIX, END))
+
+ def _log_to_file(self, log_file, command):
+ with open(log_file, "a") as f:
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ if 'TASK [' in line:
+ print("\n%s%s%s\n" % (INFO, line, END))
+ f.write(line)
+ for line in iter(proc.stderr.readline, ''):
+ f.write(line)
+ print("%s%s%s" % (WARN, line, END))
+ self._handle_result(command)
+
+ def _log_to_console(self, command, log):
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ log.debug(line)
+ for line in iter(proc.stderr.readline, ''):
+ log.warn(line)
+
+ def _handle_result(self, command):
+ try:
+ # TODO: do something with the returned output?
+ subprocess.check_output(command, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ print("%sException: %s\n\n"
+ "failover operation failed, please check log file for "
+ "further details.%s"
+ % (FAIL, e, END))
+ sys.exit()
+
+ def _init_vars(self, conf_file):
+ """ Declare constants """
+ _SECTION = "failover_failback"
+ _TARGET = "dr_target_host"
+ _SOURCE = "dr_source_map"
+ _VAULT = "vault"
+ _VAR_FILE = "var_file"
+ _ANSIBLE_PLAY = 'ansible_play'
+ setups = ['primary', 'secondary']
+
+ settings = ConfigParser()
+ settings.read(conf_file)
+ if _SECTION not in settings.sections():
+ settings.add_section(_SECTION)
+ if not settings.has_option(_SECTION, _TARGET):
+ settings.set(_SECTION, _TARGET, '')
+ if not settings.has_option(_SECTION, _SOURCE):
+ settings.set(_SECTION, _SOURCE, '')
+ if not settings.has_option(_SECTION, _VAULT):
+ settings.set(_SECTION, _VAULT, '')
+ if not settings.has_option(_SECTION, _VAR_FILE):
+ settings.set(_SECTION, _VAR_FILE, '')
+ if not settings.has_option(_SECTION, _ANSIBLE_PLAY):
+ settings.set(_SECTION, _ANSIBLE_PLAY, '')
+
+ target_host = settings.get(_SECTION, _TARGET,
+ vars=DefaultOption(settings,
+ _SECTION,
+ target_host=None))
+
+ source_map = settings.get(_SECTION, _SOURCE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ source_map=None))
+
+ vault_file = settings.get(_SECTION, _VAULT,
+ vars=DefaultOption(settings,
+ _SECTION,
+ vault=None))
+ vault_file = os.path.expanduser(vault_file)
+
+ var_file = settings.get(_SECTION, _VAR_FILE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ var_file=None))
+ var_file = os.path.expanduser(var_file)
+
+ ansible_play_file = settings.get(_SECTION, _ANSIBLE_PLAY,
+ vars=DefaultOption(settings,
+ _SECTION,
+ ansible_play=None))
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ while target_host not in setups:
+ target_host = input("%s%sThe target host '%s' was not defined. "
+ "Please provide the target host "
+ "to failover to (primary or secondary): %s"
+ % (INPUT, PREFIX, target_host, END))
+ while source_map not in setups:
+ source_map = input("%s%sThe source mapping '%s' was not defined. "
+ "Please provide the source mapping "
+ "(primary or secondary): %s"
+ % (INPUT, PREFIX, source_map, END))
+
+ while not os.path.isfile(var_file):
+ var_file = input("%s%sVar file '%s' does not exist. Please "
+ "provide the location of the var file (%s): %s"
+ % (INPUT, PREFIX, var_file, VAR_FILE_DEF, END)
+ ) or VAR_FILE_DEF
+ var_file = os.path.expanduser(var_file)
+
+ while not os.path.isfile(vault_file):
+ vault_file = input("%s%sPassword file '%s' does not exist. "
+ "Please provide a valid password file: %s"
+ % (INPUT, PREFIX, vault_file, END))
+ vault_file = os.path.expanduser(vault_file)
+
+ while not os.path.isfile(ansible_play_file):
+ ansible_play_file = input("%s%sAnsible play file '%s' does not "
+ "exist. Please provide the ansible play "
+ "file to run the failover flow (%s): %s"
+ % (INPUT,
+ PREFIX,
+ ansible_play_file,
+ PLAY_DEF,
+ END)
+ ) or PLAY_DEF
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ return target_host, source_map, var_file, vault_file, ansible_play_file
+
+ def _set_log(self, log_file, log_level):
+ logger = logging.getLogger(PREFIX)
+
+ if log_file is not None and log_file != '':
+ formatter = logging.Formatter(
+ '%(asctime)s %(levelname)s %(message)s')
+ hdlr = logging.FileHandler(log_file)
+ hdlr.setFormatter(formatter)
+ else:
+ hdlr = logging.StreamHandler(sys.stdout)
+
+ logger.addHandler(hdlr)
+ logger.setLevel(log_level)
+ return logger
+
+
+class DefaultOption(dict):
+
+ def __init__(self, config, section, **kv):
+ self._config = config
+ self._section = section
+ dict.__init__(self, **kv)
+
+ def items(self):
+ _items = []
+ for option in self:
+ if not self._config.has_option(self._section, option):
+ _items.append((option, self[option]))
+ else:
+ value_in_config = self._config.get(self._section, option)
+ _items.append((option, value_in_config))
+ return _items
+
+
+if __name__ == "__main__":
+ FailOver().run(conf_file='dr.conf',
+ log_file='/tmp/ovirt-dr.log',
+ log_level=logging.getLevelName("DEBUG"))
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_mapping.py b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_mapping.py
new file mode 100755
index 000000000..2f9152bae
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_mapping.py
@@ -0,0 +1,445 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import getopt
+import logging
+
+import ovirtsdk4 as sdk
+import ovirtsdk4.types as otypes
+
+# TODO: log file location is currently in the same folder
+logging.basicConfig(level=logging.DEBUG, filename='generator.log')
+
+
+# Documentation: We only support attached storage domains in the var generator.
+def main(argv):
+ url, username, password, ca, file_ = _init_vars(argv)
+ connection = _connect_sdk(url, username, password, ca, logging.getLogger())
+ host_storages = _get_host_storages_for_external_lun_disks(connection)
+ external_disks = _get_external_lun_disks(connection)
+ affinity_labels = _get_affinity_labels(connection)
+ domains = _get_aaa_domains(connection)
+ networks = _get_vnic_profile_mapping(connection)
+
+ f = open(file_, 'w')
+ _write_file_header(f, url, username, ca)
+ clusters, affinity_groups = _handle_dc_properties(f, connection)
+ _write_clusters(f, clusters)
+ _write_affinity_groups(f, affinity_groups)
+ _write_affinity_labels(f, affinity_labels)
+ _write_aaa_domains(f, domains)
+ _write_roles(f)
+ _write_vnic_profiles(f, networks)
+ _write_external_lun_disks(f, external_disks, host_storages)
+ connection.close()
+
+
+def _init_vars(argv):
+ url, username, password, ca, file_ = '', '', '', '', ''
+ try:
+ opts, args = getopt.getopt(
+ argv,
+ "a:u:p:f:c:", ["a=", "u=", "p=", "f=", "c="])
+ except getopt.GetoptError:
+ print(
+ '''
+ -a <http://127.0.0.1:8080/ovirt-engine/api>\n
+ -u <admin@portal>\n
+ -p <password>\n
+ -c </etc/pki/ovirt-engine/ca.pem>\n
+ -f <disaster_recovery_vars.yml>
+ ''')
+ sys.exit(2)
+
+ for opt, arg in opts:
+ if opt == '-h':
+ print(
+ '''
+ generate_mapping.py
+ -a <http://127.0.0.1:8080/ovirt-engine/api>\n
+ -u <admin@portal>\n
+ -p <password>\n
+ -c </etc/pki/ovirt-engine/ca.pem>\n
+ -f <disaster_recovery_vars.yml>
+ ''')
+ sys.exit()
+ elif opt in ("-a", "--url"):
+ url = arg
+ elif opt in ("-u", "--username"):
+ username = arg
+ elif opt in ("-p", "--password"):
+ password = arg
+ elif opt in ("-c", "--ca"):
+ ca = arg
+ elif opt in ("-f", "--file"):
+ file_ = arg
+ return url, username, password, ca, file_
+
+
+def _connect_sdk(url, username, password, ca, log_):
+ connection = sdk.Connection(
+ url=url,
+ username=username,
+ password=password,
+ ca_file=ca,
+ debug=True,
+ log=log_,
+ )
+ return connection
+
+
+def _write_file_header(f, url, username, ca):
+ """
+ Add header for paramter file, for example:
+ dr_sites_primary_url: "http://engine1.redhat.com:8080/ovirt-engine/api"
+ dr_sites_primary_username: "admin@internal"
+ dr_sites_primary_ca_file: "ovirt-share/etc/pki/ovirt-engine/ca.pem"
+
+ dr_sites_secondary_url:
+ dr_sites_secondary_username:
+ dr_sites_secondary_ca_file:
+ """
+ f.write("---\n")
+ f.write("dr_sites_primary_url: %s\n" % url)
+ f.write("dr_sites_primary_username: %s\n" % username)
+ f.write("dr_sites_primary_ca_file: %s\n\n" % ca)
+
+ f.write("# Please fill in the following properties "
+ "for the secondary site: \n")
+ f.write("dr_sites_secondary_url: # %s\n" % url)
+ f.write("dr_sites_secondary_username: # %s\n" % username)
+ f.write("dr_sites_secondary_ca_file: # %s\n\n" % ca)
+
+
+def _handle_dc_properties(f, connection):
+ f.write("dr_import_storages:\n")
+ dcs_service = connection.system_service().data_centers_service()
+ dcs_list = dcs_service.list()
+ clusters = []
+ affinity_groups = []
+ for dc in dcs_list:
+ dc_service = dcs_service.data_center_service(dc.id)
+ _write_attached_storage_domains(f, dc_service, dc)
+ _add_clusters_and_aff_groups_for_dc(dc_service,
+ clusters,
+ affinity_groups)
+ return clusters, affinity_groups
+
+
+def _get_host_storages_for_external_lun_disks(connection):
+ host_storages = {}
+ hosts_service = connection.system_service().hosts_service()
+ hosts_list = hosts_service.list(search='status=up')
+
+ # The reason we go over each active Host in the DC is that there might
+ # be a Host which fail to connect to a certain device but still be active.
+ for host in hosts_list:
+ host_storages_service = hosts_service.host_service(host.id) \
+ .storage_service().list()
+ for host_storage in host_storages_service:
+ if host_storage.id not in host_storages.keys():
+ host_storages[host_storage.id] = host_storage
+ return host_storages
+
+
+def _get_external_lun_disks(connection):
+ external_disks = []
+ disks_service = connection.system_service().disks_service()
+ disks_list = disks_service.list()
+ for disk in disks_list:
+ if otypes.DiskStorageType.LUN == disk.storage_type:
+ external_disks.append(disk)
+ return external_disks
+
+
+def _get_affinity_labels(connection):
+ affinity_labels = []
+ affinity_labels_service = \
+ connection.system_service().affinity_labels_service()
+ affinity_labels_list = affinity_labels_service.list()
+ for affinity_label in affinity_labels_list:
+ affinity_labels.append(affinity_label.name)
+ return affinity_labels
+
+
+def _get_aaa_domains(connection):
+ domains = []
+ domains_service = connection.system_service().domains_service()
+ domains_list = domains_service.list()
+ for domain in domains_list:
+ domains.append(domain.name)
+ return domains
+
+
+def _get_vnic_profile_mapping(connection):
+ networks = []
+ vnic_profiles_service = connection.system_service().vnic_profiles_service()
+ vnic_profile_list = vnic_profiles_service.list()
+ for vnic_profile_item in vnic_profile_list:
+ mapped_network = {}
+ networks_list = connection.system_service().networks_service().list()
+ network_name = ''
+ for network_item in networks_list:
+ if network_item.id == vnic_profile_item.network.id:
+ network_name = network_item.name
+ dc_name = connection.system_service().data_centers_service(). \
+ data_center_service(network_item.data_center.id). \
+ get()._name
+ break
+ mapped_network['network_name'] = network_name
+ mapped_network['network_dc'] = dc_name
+ mapped_network['profile_name'] = vnic_profile_item.name
+ mapped_network['profile_id'] = vnic_profile_item.id
+ networks.append(mapped_network)
+ return networks
+
+
+def _add_clusters_and_aff_groups_for_dc(dc_service, clusters, affinity_groups):
+ clusters_service = dc_service.clusters_service()
+ attached_clusters_list = clusters_service.list()
+ for cluster in attached_clusters_list:
+ clusters.append(cluster.name)
+ cluster_service = clusters_service.cluster_service(cluster.id)
+ _add_affinity_groups_for_cluster(cluster_service, affinity_groups)
+
+
+def _add_affinity_groups_for_cluster(cluster_service, affinity_groups):
+ affinity_groups_service = cluster_service.affinity_groups_service()
+ for affinity_group in affinity_groups_service.list():
+ affinity_groups.append(affinity_group.name)
+
+
+def _write_attached_storage_domains(f, dc_service, dc):
+ """
+ Add all the attached storage domains to the var file
+ """
+ # Locate the service that manages the storage domains that are attached
+ # to the data centers:
+ attached_sds_service = dc_service.storage_domains_service()
+ attached_sds_list = attached_sds_service.list()
+ for attached_sd in attached_sds_list:
+ if attached_sd.name == 'hosted_storage':
+ f.write("# Hosted storage should not be part of the "
+ "recovery process! Comment it out.\n")
+ f.write("#- dr_domain_type: %s\n" % attached_sd.storage.type)
+ f.write("# dr_primary_name: %s\n" % attached_sd.name)
+ f.write("# dr_primary_dc_name: %s\n\n" % dc.name)
+ continue
+
+ if attached_sd.type == otypes.StorageDomainType.EXPORT:
+ f.write("# Export storage domain should not be part of the "
+ "recovery process!\n")
+ f.write("# Please note that a data center with an export "
+ "storage domain might reflect on the failback process.\n")
+ f.write("#- dr_domain_type: %s\n" % attached_sd.storage.type)
+ f.write("# dr_primary_name: %s\n" % attached_sd.name)
+ f.write("# dr_primary_dc_name: %s\n\n" % dc.name)
+ continue
+
+ f.write("- dr_domain_type: %s\n" % attached_sd.storage.type)
+ f.write(" dr_wipe_after_delete: %s\n" % attached_sd.wipe_after_delete)
+ f.write(" dr_backup: %s\n" % attached_sd.backup)
+ f.write(" dr_critical_space_action_blocker: %s\n"
+ % attached_sd.critical_space_action_blocker)
+ f.write(" dr_storage_domain_type: %s\n" % attached_sd.type)
+ f.write(" dr_warning_low_space: %s\n"
+ % attached_sd.warning_low_space_indicator)
+ f.write(" dr_primary_name: %s\n" % attached_sd.name)
+ f.write(" dr_primary_master_domain: %s\n" % attached_sd.master)
+ f.write(" dr_primary_dc_name: %s\n" % dc.name)
+ is_fcp = attached_sd._storage.type == otypes.StorageType.FCP
+ is_scsi = attached_sd.storage.type == otypes.StorageType.ISCSI
+ if not is_fcp and not is_scsi:
+ f.write(" dr_primary_path: %s\n" % attached_sd.storage.path)
+ f.write(" dr_primary_address: %s\n" % attached_sd.storage.address)
+ if attached_sd._storage.type == otypes.StorageType.POSIXFS:
+ f.write(" dr_primary_vfs_type: %s\n"
+ % attached_sd.storage.vfs_type)
+ _add_secondary_mount(f, dc.name, attached_sd)
+ else:
+ f.write(" dr_discard_after_delete: %s\n"
+ % attached_sd.discard_after_delete)
+ f.write(" dr_domain_id: %s\n" % attached_sd.id)
+ if attached_sd._storage._type == otypes.StorageType.ISCSI:
+ f.write(" dr_primary_address: %s\n" %
+ attached_sd.storage.volume_group
+ .logical_units[0].address)
+ f.write(" dr_primary_port: %s\n" %
+ attached_sd.storage.volume_group.logical_units[0].port)
+ targets = set(lun_unit.target for lun_unit in
+ attached_sd.storage.volume_group.logical_units)
+ f.write(" dr_primary_target: [%s]\n" %
+ ','.join(['"' + target + '"' for target in targets]))
+ _add_secondary_scsi(f, dc.name, attached_sd, targets)
+ else:
+ _add_secondary_fcp(f, dc.name, attached_sd)
+ f.write("\n")
+
+
+def _add_secondary_mount(f, dc_name, attached):
+ f.write(" # Fill in the empty properties related to the secondary site\n")
+ f.write(" dr_secondary_name: # %s\n" % attached.name)
+ f.write(" dr_secondary_master_domain: # %s\n" % attached.master)
+ f.write(" dr_secondary_dc_name: # %s\n" % dc_name)
+ f.write(" dr_secondary_path: # %s\n" % attached.storage.path)
+ f.write(" dr_secondary_address: # %s\n" % attached.storage.address)
+ if attached._storage.type == otypes.StorageType.POSIXFS:
+ f.write(" dr_secondary_vfs_type: # %s\n" % attached.storage.vfs_type)
+
+
+def _add_secondary_scsi(f, dc_name, attached, targets):
+ f.write(" # Fill in the empty properties related to the secondary site\n")
+ f.write(" dr_secondary_name: # %s\n" % attached.name)
+ f.write(" dr_secondary_master_domain: # %s\n" % attached.master)
+ f.write(" dr_secondary_dc_name: # %s\n" % dc_name)
+ f.write(" dr_secondary_address: # %s\n" % attached.storage.volume_group
+ .logical_units[0].address)
+ f.write(" dr_secondary_port: # %s\n" % attached.storage.volume_group
+ .logical_units[0].port)
+ f.write(" # target example: [\"target1\",\"target2\",\"target3\"]\n")
+ f.write(" dr_secondary_target: # [%s]\n" %
+ ','.join(['"' + target + '"' for target in targets]))
+
+
+def _add_secondary_fcp(f, dc_name, attached):
+ f.write(" # Fill in the empty properties related to the secondary site\n")
+ f.write(" dr_secondary_name: # %s\n" % attached.name)
+ f.write(" dr_secondary_master_domain: # %s\n" % attached.master)
+ f.write(" dr_secondary_dc_name: # %s\n" % dc_name)
+
+
+def _write_clusters(f, clusters):
+ f.write("# Mapping for cluster\n")
+ f.write("dr_cluster_mappings:\n")
+ for cluster_name in clusters:
+ f.write("- primary_name: %s\n" % cluster_name)
+ f.write(" # Fill the correlated cluster name in the "
+ "secondary site for cluster '%s'\n" % cluster_name)
+ f.write(" secondary_name: # %s\n\n" % cluster_name)
+
+
+def _write_affinity_groups(f, affinity_groups):
+ f.write("\n# Mapping for affinity group\n")
+ f.write("dr_affinity_group_mappings:\n")
+ for affinity_group in affinity_groups:
+ f.write("- primary_name: %s\n" % affinity_group)
+ f.write(" # Fill the correlated affinity group name in the "
+ "secondary site for affinity '%s'\n" % affinity_group)
+ f.write(" secondary_name: # %s\n\n" % affinity_group)
+
+
+def _write_affinity_labels(f, affinity_labels):
+ f.write("\n# Mapping for affinity label\n")
+ f.write("dr_affinity_label_mappings:\n")
+ for affinity_label in affinity_labels:
+ f.write("- primary_name: %s\n" % affinity_label)
+ f.write(" # Fill the correlated affinity label name in the "
+ "secondary site for affinity label '%s'\n" % affinity_label)
+ f.write(" secondary_name: # %s\n\n" % affinity_label)
+
+
+def _write_aaa_domains(f, domains):
+ f.write("\n# Mapping for domain\n")
+ f.write("dr_domain_mappings: \n")
+ for domain in domains:
+ f.write("- primary_name: %s\n" % domain)
+ f.write(" # Fill in the correlated domain in the "
+ "secondary site for domain '%s'\n" % domain)
+ f.write(" secondary_name: # %s\n\n" % domain)
+
+
+def _write_roles(f):
+ f.write("\n# Mapping for role\n")
+ f.write("# Fill in any roles which should be mapped between sites.\n")
+ f.write("dr_role_mappings: \n")
+ f.write("- primary_name: \n")
+ f.write(" secondary_name: \n\n")
+
+
+def _write_vnic_profiles(f, networks):
+ f.write("dr_network_mappings:\n")
+ for network in networks:
+ f.write("- primary_network_name: %s\n" % network['network_name'])
+ f.write("# Data Center name is relevant when multiple vnic profiles"
+ " are maintained.\n")
+ f.write("# please uncomment it in case you have more than one DC.\n")
+ f.write("# primary_network_dc: %s\n" % network['network_dc'])
+ f.write(" primary_profile_name: %s\n" % network['profile_name'])
+ f.write(" primary_profile_id: %s\n" % network['profile_id'])
+ f.write(" # Fill in the correlated vnic profile properties in the "
+ "secondary site for profile '%s'\n" % network['profile_name'])
+ f.write(" secondary_network_name: # %s\n" % network['network_name'])
+ f.write("# Data Center name is relevant when multiple vnic profiles"
+ " are maintained.\n")
+ f.write("# please uncomment it in case you have more than one DC.\n")
+ f.write("# secondary_network_dc: %s\n" % network['network_dc'])
+ f.write(" secondary_profile_name: # %s\n" % network['profile_name'])
+ f.write(" secondary_profile_id: # %s\n\n" % network['profile_id'])
+
+
+def _write_external_lun_disks(f, external_disks, host_storages):
+ f.write("\n# Mapping for external LUN disks\n")
+ f.write("dr_lun_mappings:")
+ for disk in external_disks:
+ disk_id = disk.lun_storage.logical_units[0].id
+ f.write("\n- logical_unit_alias: %s\n" % disk.alias)
+ f.write(" logical_unit_description: %s\n" % disk.description)
+ f.write(" wipe_after_delete: %s\n" % disk.wipe_after_delete)
+ f.write(" shareable: %s\n" % disk.shareable)
+ f.write(" primary_logical_unit_id: %s\n" % disk_id)
+ disk_storage_type = ''
+ if host_storages.get(disk_id) is not None:
+ disk_storage_type = host_storages.get(disk_id).type
+ disk_storage = host_storages.get(disk_id).logical_units[0]
+ f.write(" primary_storage_type: %s\n" % disk_storage_type)
+ if disk_storage_type == otypes.StorageType.ISCSI:
+ portal = ''
+ if disk_storage.portal is not None:
+ splitted = disk_storage.portal.split(',')
+ if len(splitted) > 0:
+ portal = splitted[1]
+ f.write(" primary_logical_unit_address: %s\n"
+ " primary_logical_unit_port: %s\n"
+ " primary_logical_unit_portal: \"%s\"\n"
+ " primary_logical_unit_target: %s\n"
+ % (disk_storage.address,
+ disk_storage.port,
+ portal,
+ disk_storage.target))
+ if disk_storage.username is not None:
+ f.write(" primary_logical_unit_username: %s\n"
+ " primary_logical_unit_password: "
+ "PLEASE_SET_PASSWORD_HERE\n"
+ % disk_storage.username)
+
+ f.write(" # Fill in the following properties of the external LUN "
+ "disk in the secondary site\n")
+ f.write(
+ " secondary_storage_type: %s\n" % (
+ disk_storage_type
+ if disk_storage_type != ''
+ else "STORAGE TYPE COULD NOT BE FETCHED!"
+ )
+ )
+ f.write(" secondary_logical_unit_id: # %s\n" % disk_id)
+ if disk_storage_type == otypes.StorageType.ISCSI:
+ f.write(" secondary_logical_unit_address: # %s\n"
+ " secondary_logical_unit_port: # %s\n"
+ " secondary_logical_unit_portal: # \"%s\"\n"
+ " secondary_logical_unit_target: # %s\n"
+ % (disk_storage.address,
+ disk_storage.port,
+ portal,
+ disk_storage.target))
+ if disk_storage.username is not None:
+ f.write(" secondary_logical_unit_username: # %s\n"
+ " secondary_logical_unit_password:"
+ "PLEASE_SET_PASSWORD_HERE\n"
+ % disk_storage.username)
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars.py b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars.py
new file mode 100755
index 000000000..e3430e467
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars.py
@@ -0,0 +1,318 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import os.path
+import subprocess
+import sys
+
+from configparser import ConfigParser
+
+import ovirtsdk4 as sdk
+
+from bcolors import bcolors
+
+
+INFO = bcolors.OKGREEN
+INPUT = bcolors.OKGREEN
+WARN = bcolors.WARNING
+FAIL = bcolors.FAIL
+END = bcolors.ENDC
+PREFIX = "[Generate Mapping File] "
+CA_DEF = '/etc/pki/ovirt-engine/ca.pem'
+USERNAME_DEF = 'admin@internal'
+SITE_DEF = 'http://localhost:8080/ovirt-engine/api'
+PLAY_DEF = "../examples/dr_play.yml"
+
+
+class GenerateMappingFile:
+
+ def run(self, conf_file, log_file, log_level):
+ log = self._set_log(log_file, log_level)
+ log.info("Start generate variable mapping file "
+ "for oVirt ansible disaster recovery")
+ dr_tag = "generate_mapping"
+ site, username, password, ca_file, var_file, ansible_play_file = \
+ self._init_vars(conf_file, log)
+ log.info("Site address: %s \n"
+ "username: %s \n"
+ "password: *******\n"
+ "ca file location: %s \n"
+ "output file location: %s \n"
+ "ansible play location: %s ",
+ site, username, ca_file, var_file, ansible_play_file)
+ if not self._validate_connection(log,
+ site,
+ username,
+ password,
+ ca_file):
+ self._print_error(log)
+ sys.exit()
+ extra_vars = "site={0} username={1} password={2} ca={3} var_file={4}".\
+ format(site, username, password, ca_file, var_file)
+ command = [
+ "ansible-playbook", ansible_play_file,
+ "-t", dr_tag,
+ "-e", extra_vars,
+ "-vvvvv"
+ ]
+ log.info("Executing command %s", ' '.join(map(str, command)))
+ if log_file is not None and log_file != '':
+ self._log_to_file(log_file, command)
+ else:
+ self._log_to_console(command, log)
+
+ if not os.path.isfile(var_file):
+ log.error("Can not find output file in '%s'.", var_file)
+ self._print_error(log)
+ sys.exit()
+ log.info("Var file location: '%s'", var_file)
+ self._print_success(log)
+
+ def _log_to_file(self, log_file, command):
+ with open(log_file, "a") as f:
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ f.write(line)
+ for line in iter(proc.stderr.readline, ''):
+ f.write(line)
+ print("%s%s%s" % (FAIL, line, END))
+
+ def _log_to_console(self, command, log):
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ log.debug(line)
+ for line in iter(proc.stderr.readline, ''):
+ log.error(line)
+
+ def _set_log(self, log_file, log_level):
+ logger = logging.getLogger(PREFIX)
+
+ if log_file is not None and log_file != '':
+ formatter = logging.Formatter(
+ '%(asctime)s %(levelname)s %(message)s')
+ hdlr = logging.FileHandler(log_file)
+ hdlr.setFormatter(formatter)
+ else:
+ hdlr = logging.StreamHandler(sys.stdout)
+
+ logger.addHandler(hdlr)
+ logger.setLevel(log_level)
+ return logger
+
+ def _print_success(self, log):
+ msg = "Finished generating variable mapping file " \
+ "for oVirt ansible disaster recovery."
+ log.info(msg)
+ print("%s%s%s%s" % (INFO, PREFIX, msg, END))
+
+ def _print_error(self, log):
+ msg = "Failed to generate var file."
+ log.error(msg)
+ print("%s%s%s%s" % (FAIL, PREFIX, msg, END))
+
+ def _connect_sdk(self, url, username, password, ca):
+ connection = sdk.Connection(
+ url=url,
+ username=username,
+ password=password,
+ ca_file=ca,
+ )
+ return connection
+
+ def _validate_connection(self,
+ log,
+ url,
+ username,
+ password,
+ ca):
+ conn = None
+ try:
+ conn = self._connect_sdk(url,
+ username,
+ password,
+ ca)
+ dcs_service = conn.system_service().data_centers_service()
+ dcs_service.list()
+ except Exception as e:
+ msg = "Connection to setup has failed. " \
+ "Please check your credentials: " \
+ "\n URL: " + url + \
+ "\n user: " + username + \
+ "\n CA file: " + ca
+ log.error(msg)
+ print("%s%s%s%s" % (FAIL, PREFIX, msg, END))
+ log.error("Error: %s", e)
+ if conn:
+ conn.close()
+ return False
+ return True
+
+ def _validate_output_file_exists(self, output_file, log):
+ _dir = os.path.dirname(output_file)
+ if _dir != '' and not os.path.exists(_dir):
+ log.warn("Path '%s' does not exist. Creating the directory.", _dir)
+ os.makedirs(_dir)
+ if os.path.isfile(output_file):
+ valid = {"yes": True, "y": True, "ye": True,
+ "no": False, "n": False}
+ ans = input("%s%sThe output file '%s' already exists. "
+ "Would you like to override it (y,n)? %s"
+ % (WARN, PREFIX, output_file, END))
+ while True:
+ ans = ans.lower()
+ if ans in valid:
+ if valid[ans]:
+ break
+ msg = "Failed to create output file. " \
+ "File could not be overridden."
+ log.error(msg)
+ print("%s%s%s%s" % (FAIL, PREFIX, msg, END))
+ sys.exit(0)
+ ans = input("%s%sPlease respond with 'yes' or 'no': %s"
+ % (INPUT, PREFIX, END))
+ try:
+ os.remove(output_file)
+ except OSError:
+ log.error("File %s could not be replaced.", output_file)
+ print("%s%sFile %s could not be replaced.%s"
+ % (FAIL, PREFIX, output_file, END))
+ sys.exit(0)
+
+ def _init_vars(self, conf_file, log):
+ """ Declare constants """
+ _SECTION = 'generate_vars'
+ _SITE = 'site'
+ _USERNAME = 'username'
+ _PASSWORD = 'password'
+ _CA_FILE = 'ca_file'
+ # TODO: Must have full path, should add relative path support.
+ _OUTPUT_FILE = 'output_file'
+ _ANSIBLE_PLAY = 'ansible_play'
+
+ settings = ConfigParser()
+ settings.read(conf_file)
+ if _SECTION not in settings.sections():
+ settings.add_section(_SECTION)
+ if not settings.has_option(_SECTION, _SITE):
+ settings.set(_SECTION, _SITE, '')
+ if not settings.has_option(_SECTION, _USERNAME):
+ settings.set(_SECTION, _USERNAME, '')
+ if not settings.has_option(_SECTION, _PASSWORD):
+ settings.set(_SECTION, _PASSWORD, '')
+ if not settings.has_option(_SECTION, _CA_FILE):
+ settings.set(_SECTION, _CA_FILE, '')
+ if not settings.has_option(_SECTION, _OUTPUT_FILE):
+ settings.set(_SECTION, _OUTPUT_FILE, '')
+ if not settings.has_option(_SECTION, _ANSIBLE_PLAY):
+ settings.set(_SECTION, _ANSIBLE_PLAY, '')
+
+ site = settings.get(_SECTION, _SITE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ site=None))
+
+ username = settings.get(_SECTION, _USERNAME,
+ vars=DefaultOption(settings,
+ _SECTION,
+ username=None))
+
+ password = settings.get(_SECTION, _PASSWORD,
+ vars=DefaultOption(settings,
+ _SECTION,
+ password=None))
+
+ ca_file = settings.get(_SECTION, _CA_FILE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ ca_file=None))
+ ca_file = os.path.expanduser(ca_file)
+
+ output_file = settings.get(_SECTION, _OUTPUT_FILE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ output_file=None))
+ output_file = os.path.expanduser(output_file)
+
+ ansible_play_file = settings.get(_SECTION, _ANSIBLE_PLAY,
+ vars=DefaultOption(settings,
+ _SECTION,
+ ansible_play=None))
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ if not site:
+ site = input("%s%sSite address is not initialized. "
+ "Please provide the site URL (%s): %s"
+ % (INPUT, PREFIX, SITE_DEF, END)
+ ) or SITE_DEF
+ if not username:
+ username = input("%s%sUsername is not initialized. "
+ "Please provide the username (%s): %s"
+ % (INPUT, PREFIX, USERNAME_DEF, END)
+ ) or USERNAME_DEF
+ while not password:
+ password = input("%s%sPassword is not initialized. "
+ "Please provide the password for username %s: %s"
+ % (INPUT, PREFIX, username, END))
+
+ while not os.path.isfile(ca_file):
+ ca_file = input("%s%sCA file '%s' does not exist. "
+ "Please provide the CA file location (%s):%s "
+ % (INPUT, PREFIX, ca_file, CA_DEF, END)
+ ) or CA_DEF
+ ca_file = os.path.expanduser(ca_file)
+
+ while not output_file:
+ output_file = input("%s%sOutput file location is not initialized. "
+ "Please provide the output file location "
+ "for the mapping var file (%s): %s"
+ % (INPUT, PREFIX, _OUTPUT_FILE, END)
+ ) or _OUTPUT_FILE
+ output_file = os.path.expanduser(output_file)
+ self._validate_output_file_exists(output_file, log)
+
+ while not os.path.isfile(ansible_play_file):
+ ansible_play_file = input("%s%sAnsible play file '%s' does not "
+ "exist. Please provide the ansible play "
+ "file to generate the mapping var file "
+ "(%s): %s" % (INPUT,
+ PREFIX,
+ ansible_play_file,
+ PLAY_DEF,
+ END)
+ ) or PLAY_DEF
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ return site, username, password, ca_file, output_file, ansible_play_file
+
+
+class DefaultOption(dict):
+
+ def __init__(self, config, section, **kv):
+ self._config = config
+ self._section = section
+ dict.__init__(self, **kv)
+
+ def items(self):
+ _items = []
+ for option in self:
+ if not self._config.has_option(self._section, option):
+ _items.append((option, self[option]))
+ else:
+ value_in_config = self._config.get(self._section, option)
+ _items.append((option, value_in_config))
+ return _items
+
+
+if __name__ == "__main__":
+ GenerateMappingFile().run(conf_file='dr.conf',
+ log_file='/tmp/ovirt-dr.log',
+ log_level=logging.getLevelName("DEBUG"))
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars_test.py b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars_test.py
new file mode 100755
index 000000000..be529cd04
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars_test.py
@@ -0,0 +1,38 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+from contextlib import contextmanager
+import pexpect
+
+
+@contextmanager
+def generator(tmpdir):
+ env = dict(os.environ)
+ env["PYTHONUNBUFFERED"] = "x"
+ env["GENERATE_VARS_CONF_DIR"] = str(tmpdir)
+ env["GENERATE_VARS_OUT_DIR"] = str(tmpdir)
+ gen = pexpect.spawn('./generate-vars', env=env)
+ try:
+ yield gen
+ finally:
+ gen.terminate(force=True)
+
+
+INITIAL_CONF = """
+[generate_vars]
+"""
+
+
+def test_initial_conf(tmpdir):
+ conf = tmpdir.join("dr.conf")
+ conf.write(INITIAL_CONF)
+ with generator(tmpdir) as gen:
+ # TODO: Use regex
+ gen.expect('override')
+ # Add dry run
+ gen.sendline('y')
+ # "/tmp/dr_ovirt-ansible/mapping_vars.yml"
+ assert os.path.exists("/tmp/dr_ovirt-ansible/mapping_vars.yml")
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/ovirt-dr b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/ovirt-dr
new file mode 100755
index 000000000..21fcd0eca
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/ovirt-dr
@@ -0,0 +1,158 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging as logg
+import os
+import sys
+import time
+import getopt
+
+from configparser import ConfigParser
+
+import fail_back
+import fail_over
+import generate_vars
+import validator
+
+VALIDATE = 'validate'
+GENERATE = 'generate'
+FAILOVER = 'failover'
+FAILBACK = 'failback'
+LOG_FILE = 'log-file'
+LOG_LEVEL = 'log-level'
+DEF_LOG_FILE = ""
+DEF_DEBUG_LEVEL = 'DEBUG'
+DEF_CONF_FILE = 'dr.conf'
+
+
+def main(argv):
+ action, conf_file, log_file, log_level = _init_vars(argv)
+ while not os.path.isfile(conf_file):
+ conf_file = input(
+ "Conf file '" + conf_file + "' does not exist."
+ " Please provide the configuration file location: ")
+
+ if action != 'validate':
+ log_file = log_file.format(int(round(time.time() * 1000)))
+ if log_level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR']:
+ print("ovirt-dr: log level must be 'DEBUG' 'INFO' 'WARNING' 'ERROR'\n"
+ "Use 'ovirt-dr --help' for more information.")
+ sys.exit(2)
+
+ create_log_dir(log_file)
+ _print_log_file_name(log_file)
+ if action == 'validate':
+ validator.ValidateMappingFile().run(conf_file)
+ elif action == 'generate':
+ generate_vars.GenerateMappingFile().run(conf_file,
+ log_file,
+ logg.getLevelName(log_level))
+ _print_log_file_name(log_file)
+ elif action == 'failover':
+ fail_over.FailOver().run(conf_file,
+ log_file,
+ logg.getLevelName(log_level))
+ _print_log_file_name(log_file)
+ elif action == 'failback':
+ fail_back.FailBack().run(conf_file,
+ log_file,
+ logg.getLevelName(log_level))
+ _print_log_file_name(log_file)
+ elif action == '--help':
+ help_log()
+ else:
+ print("\tError: action '%s' is not defined" % action)
+ help_log()
+
+
+def _print_log_file_name(log_file):
+ if log_file is not None and log_file != '':
+ print("Log file: '%s'" % log_file)
+
+
+def _init_vars(argv):
+ conf_file = DEF_CONF_FILE
+ log_file = ''
+ log_level = ''
+
+ if len(argv) == 0:
+ print("ovirt-dr: missing action operand\n"
+ "Use 'ovirt-dr --help' for more information.")
+ sys.exit(2)
+ action = argv[0]
+
+ try:
+ opts, args = \
+ getopt.getopt(argv[1:], "f:log:level:",
+ ["conf-file=", "log-file=", "log-level="])
+ except getopt.GetoptError:
+ help_log()
+ sys.exit(2)
+
+ for opt, arg in opts:
+ if opt in ("-f", "--conf-file"):
+ conf_file = arg
+ if opt in ("-log", "--log-file"):
+ log_file = arg
+ if opt in ("-level", "--log-level"):
+ log_level = arg
+
+ log_file, log_level = _get_log_conf(conf_file, log_file, log_level)
+ return action, conf_file, log_file, log_level.upper()
+
+
+def _get_log_conf(conf_file, log_file, log_level):
+ log_section = "log"
+ log_file_conf = "log_file"
+ log_level_conf = "log_level"
+ while not os.path.isfile(conf_file):
+ conf_file = input(
+ "Conf file '" + conf_file + "' does not exist."
+ " Please provide the configuration file location: ")
+ settings = ConfigParser()
+ settings.read(conf_file)
+ if log_section not in settings.sections():
+ settings.add_section(log_section)
+ if settings.has_option(log_section, log_file_conf) and \
+ (log_file is None or log_file == ''):
+ log_file = settings.get(log_section, log_file_conf)
+ if settings.has_option(log_section, log_level_conf) and \
+ (log_level is None or log_level == ''):
+ log_level = settings.get(log_section, log_level_conf)
+ else:
+ log_level = "DEBUG"
+ return log_file, log_level
+
+
+def create_log_dir(fname):
+ _dir = os.path.dirname(fname)
+ if _dir != '' and not os.path.exists(_dir):
+ os.makedirs(_dir)
+
+
+def help_log():
+ print(
+ '''
+ \tusage: ovirt-dr <%s/%s/%s/%s>
+ [--conf-file=dr.conf]
+ [--log-file=log_file.log]
+ [--log-level=DEBUG/INFO/WARNING/ERROR]\n
+ \tHere is a description of the following actions:\n
+ \t\t%s\tGenerate the mapping var file based on primary setup
+ \t\t%s\tValidate the var file mapping
+ \t\t%s\tStart a failover process to the target setup
+ \t\t%s\tStart a failback process to the source setup
+ ''' % (GENERATE,
+ VALIDATE,
+ FAILOVER,
+ FAILBACK,
+ GENERATE,
+ VALIDATE,
+ FAILOVER,
+ FAILBACK))
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/validator.py b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/validator.py
new file mode 100755
index 000000000..90e028199
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/validator.py
@@ -0,0 +1,731 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import yaml
+
+import ovirtsdk4 as sdk
+from ovirtsdk4 import types
+
+from bcolors import bcolors
+from configparser import ConfigParser
+
+
+INFO = bcolors.OKGREEN
+INPUT = bcolors.OKGREEN
+WARN = bcolors.WARNING
+FAIL = bcolors.FAIL
+END = bcolors.ENDC
+PREFIX = "[Validate Mapping File] "
+
+
+class ValidateMappingFile:
+
+ def_var_file = "../examples/disaster_recovery_vars.yml"
+ default_main_file = "../defaults/main.yml"
+ var_file = ""
+ running_vms = "dr_running_vms"
+ cluster_map = 'dr_cluster_mappings'
+ domain_map = 'dr_import_storages'
+ role_map = 'dr_role_mappings'
+ aff_group_map = 'dr_affinity_group_mappings'
+ aff_label_map = 'dr_affinity_label_mappings'
+ network_map = 'dr_network_mappings'
+
+ def run(self, conf_file):
+ print("%s%sValidate variable mapping file "
+ "for oVirt ansible disaster recovery%s"
+ % (INFO, PREFIX, END))
+ self._set_dr_conf_variables(conf_file)
+ print("%s%sVar File: '%s'%s" % (INFO, PREFIX, self.var_file, END))
+
+ python_vars = self._read_var_file()
+ if (not self._validate_lists_in_mapping_file(python_vars)
+ or not self._validate_duplicate_keys(python_vars)
+ or not self._entity_validator(python_vars)
+ or not self._validate_failback_leftovers()):
+ self._print_finish_error()
+ sys.exit()
+
+ if not self._validate_hosted_engine(python_vars):
+ self._print_finish_error()
+ sys.exit()
+
+ if not self._validate_export_domain(python_vars):
+ self._print_finish_error()
+ sys.exit()
+ self._print_finish_success()
+
+ def _validate_lists_in_mapping_file(self, mapping_vars):
+ return self._is_list(mapping_vars, self.cluster_map) and self._is_list(
+ mapping_vars, self.domain_map) and self._is_list(
+ mapping_vars, self.role_map) and self._is_list(
+ mapping_vars, self.aff_group_map) and self._is_list(
+ mapping_vars, self.aff_label_map) and self._is_list(
+ mapping_vars, self.network_map)
+
+ def _is_list(self, mapping_vars, mapping):
+ map_file = mapping_vars.get(mapping)
+ if not isinstance(map_file, list) and map_file is not None:
+ print("%s%s%s is not a list: '%s'."
+ " Please check your mapping file%s"
+ % (FAIL, PREFIX, mapping, map_file, END))
+ return False
+ return True
+
+ def _print_finish_error(self):
+ print("%s%sFailed to validate variable mapping file "
+ "for oVirt ansible disaster recovery%s"
+ % (FAIL, PREFIX, END))
+
+ def _print_finish_success(self):
+ print("%s%sFinished validation of variable mapping file "
+ "for oVirt ansible disaster recovery%s"
+ % (INFO, PREFIX, END))
+
+ def _read_var_file(self):
+ with open(self.var_file, 'r') as info:
+ info_dict = yaml.safe_load(info)
+ return info_dict
+
+ def _set_dr_conf_variables(self, conf_file):
+ _SECTION = 'validate_vars'
+ _VAR_FILE = 'var_file'
+
+ # Get default location of the yml var file.
+ settings = ConfigParser()
+ settings.read(conf_file)
+ if _SECTION not in settings.sections():
+ settings.add_section(_SECTION)
+ if not settings.has_option(_SECTION, _VAR_FILE):
+ settings.set(_SECTION, _VAR_FILE, '')
+ var_file = settings.get(_SECTION, _VAR_FILE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ site=self.def_var_file))
+ var_file = os.path.expanduser(var_file)
+
+ while not os.path.isfile(var_file):
+ var_file = input("%s%sVar file '%s' does not exist. Please "
+ "provide the location of the var file (%s): %s"
+ % (WARN, PREFIX, var_file, self.def_var_file, END)
+ ) or self.def_var_file
+ var_file = os.path.expanduser(var_file)
+ self.var_file = var_file
+
+ self.primary_pwd = input(
+ "%s%sPlease provide password for the primary setup: %s"
+ % (INPUT, PREFIX, END))
+ self.second_pwd = input(
+ "%s%sPlease provide password for the secondary setup: %s"
+ % (INPUT, PREFIX, END))
+
+ def _print_duplicate_keys(self, duplicates, keys):
+ ret_val = False
+ for key in keys:
+ if len(duplicates[key]) > 0:
+ print("%s%sFound the following duplicate keys in %s: %s%s" %
+ (FAIL, PREFIX, key, list(duplicates[key]), END))
+ ret_val = True
+ return ret_val
+
+ def _entity_validator(self, python_vars):
+ ovirt_setups = ConnectSDK(
+ python_vars,
+ self.primary_pwd,
+ self.second_pwd)
+ isValid = ovirt_setups.validate_primary()
+ isValid = ovirt_setups.validate_secondary() and isValid
+ if isValid:
+ primary_conn, second_conn = '', ''
+ try:
+ primary_conn = ovirt_setups.connect_primary()
+ if primary_conn is None:
+ return False
+ isValid = self._validate_entities_in_setup(
+ primary_conn, 'primary', python_vars) and isValid
+ second_conn = ovirt_setups.connect_secondary()
+ if second_conn is None:
+ return False
+ isValid = self._validate_entities_in_setup(
+ second_conn, 'secondary', python_vars) and isValid
+ cluster_mapping = python_vars.get(self.cluster_map)
+ isValid = isValid and self._validate_vms_for_failback(
+ primary_conn,
+ "primary")
+ isValid = isValid and self._validate_vms_for_failback(
+ second_conn,
+ "secondary")
+ isValid = isValid and self._is_compatible_versions(
+ primary_conn,
+ second_conn,
+ cluster_mapping)
+ finally:
+ # Close the connections.
+ if primary_conn:
+ primary_conn.close()
+ if second_conn:
+ second_conn.close()
+
+ return isValid
+
+ def _validate_failback_leftovers(self):
+ valid = {"yes": True, "y": True, "ye": True,
+ "no": False, "n": False}
+ with open(self.default_main_file, 'r') as stream:
+ try:
+ info_dict = yaml.safe_load(stream)
+ running_vms_file = info_dict.get(self.running_vms)
+ if os.path.isfile(running_vms_file):
+ ans = input(
+ "%s%sFile with running vms info already exists from "
+ "previous failback operation. Do you want to "
+ "delete it (yes,no)?: %s" %
+ (WARN, PREFIX, END))
+ ans = ans.lower()
+ if ans in valid and valid[ans]:
+ os.remove(running_vms_file)
+ print("%s%sFile '%s' has been deleted successfully%s" %
+ (INFO, PREFIX, running_vms_file, END))
+ else:
+ print("%s%sFile '%s' has not been deleted."
+ " It will be used in the next failback"
+ " operation%s" %
+ (INFO, PREFIX, running_vms_file, END))
+
+ except yaml.YAMLError as exc:
+ print("%s%syaml file '%s' could not be loaded%s"
+ % (FAIL, PREFIX, self.default_main_file, END))
+ print(exc)
+ return False
+ except OSError as ex:
+ print("%s%sFail to validate failback running vms file '%s'%s"
+ % (FAIL, PREFIX, self.default_main_file, END))
+ print(ex)
+ return False
+ return True
+
+ def _validate_entities_in_setup(self, conn, setup, python_vars):
+ dcs_service = conn.system_service().data_centers_service()
+ dcs_list = dcs_service.list()
+ clusters = []
+ affinity_groups = set()
+ for dc in dcs_list:
+ dc_service = dcs_service.data_center_service(dc.id)
+ clusters_service = dc_service.clusters_service()
+ attached_clusters_list = clusters_service.list()
+ for cluster in attached_clusters_list:
+ clusters.append(cluster.name)
+ cluster_service = clusters_service.cluster_service(cluster.id)
+ affinity_groups.update(
+ self._fetch_affinity_groups(cluster_service))
+ aff_labels = self._get_affinity_labels(conn)
+ aaa_domains = self._get_aaa_domains(conn)
+ # TODO: Remove once vnic profile is validated.
+ networks = self._get_vnic_profile_mapping(conn)
+ isValid = self._validate_networks(
+ python_vars,
+ networks,
+ setup)
+ isValid = self._validate_entity_exists(
+ clusters,
+ python_vars,
+ self.cluster_map,
+ setup) and isValid
+ isValid = self._validate_entity_exists(
+ list(affinity_groups),
+ python_vars,
+ self.aff_group_map,
+ setup) and isValid
+ isValid = self._validate_entity_exists(
+ aff_labels,
+ python_vars,
+ self.aff_label_map,
+ setup) and isValid
+ return isValid
+
+ def _fetch_affinity_groups(self, cluster_service):
+ affinity_groups = set()
+ affinity_groups_service = cluster_service.affinity_groups_service()
+ for affinity_group in affinity_groups_service.list():
+ affinity_groups.add(affinity_group.name)
+ return list(affinity_groups)
+
+ def _get_affinity_labels(self, conn):
+ affinity_labels = set()
+ affinity_labels_service = \
+ conn.system_service().affinity_labels_service()
+ for affinity_label in affinity_labels_service.list():
+ affinity_labels.add(affinity_label.name)
+ return list(affinity_labels)
+
+ def _get_aaa_domains(self, conn):
+ domains = []
+ domains_service = conn.system_service().domains_service()
+ domains_list = domains_service.list()
+ for domain in domains_list:
+ domains.append(domain.name)
+ return domains
+
+ def _get_vnic_profile_mapping(self, conn):
+ networks = []
+ vnic_profiles_service = conn.system_service().vnic_profiles_service()
+ vnic_profile_list = vnic_profiles_service.list()
+ for vnic_profile_item in vnic_profile_list:
+ mapped_network = {}
+ networks_list = conn.system_service().networks_service().list()
+ network_name = ''
+ for network_item in networks_list:
+ if network_item.id == vnic_profile_item.network.id:
+ network_name = network_item.name
+ dc_name = conn.system_service().data_centers_service(). \
+ data_center_service(network_item.data_center.id). \
+ get()._name
+ break
+ mapped_network['network_name'] = network_name
+ # TODO: 'dc_name' might be referenced before assignment.
+ mapped_network['network_dc'] = dc_name
+ mapped_network['profile_name'] = vnic_profile_item.name
+ networks.append(mapped_network)
+ return networks
+
+ def _key_setup(self, setup, key):
+ if setup == 'primary':
+ if key == 'dr_import_storages':
+ return 'dr_primary_name'
+ if key == 'dr_network_mappings':
+ return ['primary_profile_name',
+ 'primary_network_name',
+ 'primary_network_dc']
+ return 'primary_name'
+ elif setup == 'secondary':
+ if key == 'dr_import_storages':
+ return 'dr_secondary_name'
+ if key == 'dr_network_mappings':
+ return ['secondary_profile_name',
+ 'secondary_network_name',
+ 'secondary_network_dc']
+ return 'secondary_name'
+
+ def _validate_networks(self, var_file, networks_setup, setup):
+ dups = self._get_network_dups(networks_setup)
+ _mappings = var_file.get(self.network_map)
+ keys = self._key_setup(setup, self.network_map)
+ for mapping in _mappings:
+ map_key = mapping[keys[0]] + \
+ "_" + mapping[keys[1]] + \
+ "_" + (mapping[keys[2]] if keys[2] in mapping else "")
+ if map_key in dups:
+ if keys[2] not in mapping:
+ print(
+ "%s%sVnic profile name '%s' and network name '%s'"
+ " are related to multiple data centers in the"
+ " %s setup. Please specify the data center name in"
+ " the mapping var file.%s" %
+ (FAIL,
+ PREFIX,
+ mapping[keys[0]],
+ mapping[keys[1]],
+ setup,
+ END))
+ return False
+ # TODO: Add check whether the data center exists in the setup
+ print("%s%sFinished validation for 'dr_network_mappings' for "
+ "%s setup with success.%s" %
+ (INFO, PREFIX, setup, END))
+ return True
+
+ def _get_network_dups(self, networks_setup):
+ attributes = [attr['profile_name']
+ + "_"
+ + attr['network_name']
+ + "_"
+ + attr['network_dc'] for attr in networks_setup]
+ dups = [x for n, x in enumerate(attributes) if x in attributes[:n]]
+ return dups
+
+ def _validate_entity_exists(self, _list, var_file, key, setup):
+ isValid = True
+ key_setup = self._key_setup(setup, key)
+ _mapping = var_file.get(key)
+ if _mapping is None:
+ return isValid
+ for x in _mapping:
+ if key_setup not in x.keys():
+ print(
+ "%s%sdictionary key '%s' is not included in %s[%s].%s" %
+ (FAIL,
+ PREFIX,
+ key_setup,
+ key,
+ x.keys(),
+ END))
+ isValid = False
+ if isValid and x[key_setup] not in _list:
+ print(
+ "%s%s%s entity '%s':'%s' does not exist in the "
+ "setup.\n%sThe entities which exists in the setup "
+ "are: %s.%s" %
+ (FAIL,
+ PREFIX,
+ key,
+ key_setup,
+ x[key_setup],
+ PREFIX,
+ _list,
+ END))
+ isValid = False
+ if isValid:
+ print(
+ "%s%sFinished validation for '%s' for key name "
+ "'%s' with success.%s" %
+ (INFO, PREFIX, key, key_setup, END))
+ return isValid
+
+ def _validate_hosted_engine(self, var_file):
+ domains = var_file[self.domain_map]
+ hosted = 'hosted_storage'
+ for domain in domains:
+ primary = domain['dr_primary_name']
+ secondary = domain['dr_secondary_name']
+ if primary == hosted or secondary == hosted:
+ print("%s%sHosted storage domains are not supported.%s"
+ % (FAIL, PREFIX, END))
+ return False
+ return True
+
+ def _validate_export_domain(self, var_file):
+ domains = var_file[self.domain_map]
+ for domain in domains:
+ domain_type = domain['dr_storage_domain_type']
+ if domain_type == 'export':
+ print("%s%sExport storage domain is not supported.%s"
+ % (FAIL, PREFIX, END))
+ return False
+ return True
+
+ def _validate_duplicate_keys(self, var_file):
+ clusters = 'clusters'
+ domains = 'domains'
+ roles = 'roles'
+ aff_groups = 'aff_groups'
+ aff_labels = 'aff_labels'
+ network = 'network'
+ key1 = 'primary_name'
+ key2 = 'secondary_name'
+ dr_primary_name = 'dr_primary_name'
+ dr_secondary_name = 'dr_secondary_name'
+
+ duplicates = self._get_dups(
+ var_file, [
+ [clusters, self.cluster_map, key1, key2],
+ [domains, self.domain_map, dr_primary_name, dr_secondary_name],
+ [roles, self.role_map, key1, key2],
+ [aff_groups, self.aff_group_map, key1, key2],
+ [aff_labels, self.aff_label_map, key1, key2]])
+ duplicates[network] = self._get_dup_network(var_file)
+ return not self._print_duplicate_keys(
+ duplicates,
+ [clusters, domains, roles, aff_groups, aff_labels, network])
+
+ def _validate_vms_for_failback(self, setup_conn, setup_type):
+ vms_in_preview = []
+ vms_delete_protected = []
+ service_setup = setup_conn.system_service().vms_service()
+ for vm in service_setup.list():
+ vm_service = service_setup.vm_service(vm.id)
+ if vm.delete_protected:
+ vms_delete_protected.append(vm.name)
+ snapshots_service = vm_service.snapshots_service()
+ for snapshot in snapshots_service.list():
+ if snapshot.snapshot_status == types.SnapshotStatus.IN_PREVIEW:
+ vms_in_preview.append(vm.name)
+ if len(vms_in_preview) > 0:
+ print("%s%sFailback process does not support VMs in preview."
+ " The '%s' setup contains the following previewed vms:"
+ " '%s'%s"
+ % (FAIL, PREFIX, setup_type, vms_in_preview, END))
+ return False
+ if len(vms_delete_protected) > 0:
+ print("%s%sFailback process does not support delete protected"
+ " VMs. The '%s' setup contains the following vms:"
+ " '%s'%s"
+ % (FAIL, PREFIX, setup_type, vms_delete_protected, END))
+ return False
+ return True
+
+ def _is_compatible_versions(self,
+ primary_conn,
+ second_conn,
+ cluster_mapping):
+ """ Validate cluster versions """
+ service_primary = primary_conn.system_service().clusters_service()
+ service_sec = second_conn.system_service().clusters_service()
+ for cluster_map in cluster_mapping:
+ search_prime = "name=%s" % cluster_map['primary_name']
+ search_sec = "name=%s" % cluster_map['secondary_name']
+ cluster_prime = service_primary.list(search=search_prime)[0]
+ cluster_sec = service_sec.list(search=search_sec)[0]
+ prime_ver = cluster_prime.version
+ sec_ver = cluster_sec.version
+ if (prime_ver.major != sec_ver.major
+ or prime_ver.minor != sec_ver.minor):
+ print("%s%sClusters have incompatible versions. "
+ "primary setup ('%s' %s.%s) not equal to "
+ "secondary setup ('%s' %s.%s)%s"
+ % (FAIL,
+ PREFIX,
+ cluster_prime.name,
+ prime_ver.major,
+ prime_ver.minor,
+ cluster_sec.name,
+ sec_ver.major,
+ sec_ver.minor,
+ END))
+ return False
+ return True
+
+ def _get_dups(self, var_file, mappings):
+ duplicates = {}
+ for mapping in mappings:
+ _return_set = set()
+ _mapping = var_file.get(mapping[1])
+ if _mapping is None or len(_mapping) < 1:
+ print("%s%smapping %s is empty in var file%s"
+ % (WARN, PREFIX, mapping[1], END))
+ duplicates[mapping[0]] = _return_set
+ continue
+ _primary = set()
+ _second = set()
+ _return_set.update(
+ set(x[mapping[2]]
+ for x in _mapping
+ if x[mapping[2]]
+ in _primary or _primary.add(x[mapping[2]])))
+ _return_set.update(
+ set(x[mapping[3]]
+ for x in _mapping
+ if x[mapping[3]]
+ in _second or _second.add(x[mapping[3]])))
+ duplicates[mapping[0]] = _return_set
+ return duplicates
+
+ def _get_dup_network(self, var_file):
+ _return_set = set()
+ # TODO: Add data center also
+ _mapping = var_file.get(self.network_map)
+ if _mapping is None or len(_mapping) < 1:
+ print("%s%sNetwork has not been initialized in var file%s"
+ % (WARN, PREFIX, END))
+ return _return_set
+
+ # Check for profile + network name duplicates in primary
+ _primary1 = set()
+ key1_a = 'primary_profile_name'
+ key1_b = 'primary_network_name'
+ key1_c = 'primary_network_dc'
+ for x in _mapping:
+ if x[key1_a] is None or x[key1_b] is None:
+ print("%s%sNetwork '%s' is not initialized in map %s %s%s"
+ % (FAIL,
+ PREFIX,
+ x,
+ x[key1_a],
+ x[key1_b],
+ END))
+ sys.exit()
+ primary_dc_name = ''
+ if key1_c in x:
+ primary_dc_name = x[key1_c]
+ map_key = x[key1_a] + "_" + x[key1_b] + "_" + primary_dc_name
+ if map_key in _primary1:
+ _return_set.add(map_key)
+ else:
+ _primary1.add(map_key)
+
+ # Check for profile + network name duplicates in secondary
+ _second1 = set()
+ val1_a = 'secondary_profile_name'
+ val1_b = 'secondary_network_name'
+ val1_c = 'secondary_network_dc'
+ for x in _mapping:
+ if x[val1_a] is None or x[val1_b] is None:
+ print("%s%sThe following network mapping is not "
+ "initialized in var file mapping:\n"
+ " %s:'%s'\n %s:'%s'%s"
+ % (FAIL,
+ PREFIX,
+ val1_a,
+ x[val1_a],
+ val1_b,
+ x[val1_b],
+ END))
+ sys.exit()
+ secondary_dc_name = ''
+ if val1_c in x:
+ secondary_dc_name = x[val1_c]
+ map_key = x[val1_a] + "_" + x[val1_b] + "_" + secondary_dc_name
+ if map_key in _second1:
+ _return_set.add(map_key)
+ else:
+ _second1.add(map_key)
+
+ return _return_set
+
+
+class DefaultOption(dict):
+
+ def __init__(self, config, section, **kv):
+ self._config = config
+ self._section = section
+ dict.__init__(self, **kv)
+
+ def items(self):
+ _items = []
+ for option in self:
+ if not self._config.has_option(self._section, option):
+ _items.append((option, self[option]))
+ else:
+ value_in_config = self._config.get(self._section, option)
+ _items.append((option, value_in_config))
+ return _items
+
+
+class ConnectSDK:
+ primary_url, primary_user, primary_ca = '', '', ''
+ second_url, second_user, second_ca = '', '', ''
+ prefix = ''
+ error_msg = "%s%s The '%s' field in the %s setup is not " \
+ "initialized in var file mapping.%s"
+
+ def __init__(self, var_file, primary_pwd, second_pwd):
+ """
+ ---
+ dr_sites_primary_url: http://xxx.xx.xx.xxx:8080/ovirt-engine/api
+ dr_sites_primary_username: admin@internal
+ dr_sites_primary_ca_file: /etc/pki/ovirt-engine/ca.pem
+
+ # Please fill in the following properties for the secondary site:
+ dr_sites_secondary_url: http://yyy.yy.yy.yyy:8080/ovirt-engine/api
+ dr_sites_secondary_username: admin@internal
+ dr_sites_secondary_ca_file: /etc/pki/ovirt-engine_secondary/ca.pem
+ """
+ self.primary_url = var_file.get('dr_sites_primary_url')
+ self.primary_user = var_file.get('dr_sites_primary_username')
+ self.primary_ca = var_file.get('dr_sites_primary_ca_file')
+ self.second_url = var_file.get('dr_sites_secondary_url')
+ self.second_user = var_file.get('dr_sites_secondary_username')
+ self.second_ca = var_file.get('dr_sites_secondary_ca_file')
+ self.primary_pwd = primary_pwd
+ self.second_pwd = second_pwd
+
+ def validate_primary(self):
+ isValid = True
+ if self.primary_url is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "url",
+ "primary",
+ END))
+ isValid = False
+ if self.primary_user is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "username",
+ "primary",
+ END))
+ isValid = False
+ if self.primary_ca is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "ca",
+ "primary",
+ END))
+ isValid = False
+ return isValid
+
+ def validate_secondary(self):
+ isValid = True
+ if self.second_url is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "url",
+ "secondary",
+ END))
+ isValid = False
+ if self.second_user is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "username",
+ "secondary",
+ END))
+ isValid = False
+ if self.second_ca is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "ca",
+ "secondary",
+ END))
+ isValid = False
+ return isValid
+
+ def _validate_connection(self, url, username, password, ca):
+ conn = None
+ try:
+ conn = self._connect_sdk(url, username, password, ca)
+ dcs_service = conn.system_service().data_centers_service()
+ dcs_service.list()
+ except Exception:
+ print(
+ "%s%sConnection to setup has failed."
+ " Please check your credentials: "
+ "\n%s URL: %s"
+ "\n%s user: %s"
+ "\n%s CA file: %s%s" %
+ (FAIL,
+ PREFIX,
+ PREFIX,
+ url,
+ PREFIX,
+ username,
+ PREFIX,
+ ca,
+ END))
+ if conn:
+ conn.close()
+ return None
+ return conn
+
+ def connect_primary(self):
+ return self._validate_connection(self.primary_url,
+ self.primary_user,
+ self.primary_pwd,
+ self.primary_ca)
+
+ def connect_secondary(self):
+ return self._validate_connection(self.second_url,
+ self.second_user,
+ self.second_pwd,
+ self.second_ca)
+
+ def _connect_sdk(self, url, username, password, ca):
+ connection = sdk.Connection(
+ url=url,
+ username=username,
+ password=password,
+ ca_file=ca,
+ )
+ return connection
+
+
+if __name__ == "__main__":
+ ValidateMappingFile().run('dr.conf')
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/vault_secret.sh b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/vault_secret.sh
new file mode 100644
index 000000000..52e31c6bd
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/vault_secret.sh
@@ -0,0 +1 @@
+echo $vault_password
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_disks.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_disks.yml
new file mode 100644
index 000000000..de6875887
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_disks.yml
@@ -0,0 +1,12 @@
+---
+- name: Remove disk main block
+ block:
+ - name: "Remove disk '{{ disk.id }}'"
+ ovirt_disk:
+ state: absent
+ id: "{{ disk.id }}"
+ auth: "{{ ovirt_auth }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain.yml
new file mode 100644
index 000000000..cab1f83b9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain.yml
@@ -0,0 +1,22 @@
+---
+- name: Remove storage domain main block
+ block:
+ # If we get the exception "Cannot deactivate Master Data Domain while there are running tasks on its Data Center."
+ # We should wait for some time and try again
+ - name: "Remove storage domain '{{ sd.name }}'"
+ ovirt_storage_domain:
+ state: absent
+ id: "{{ sd.id }}"
+ name: "{{ sd.name }}"
+ auth: "{{ ovirt_auth }}"
+ host: "{{ host }}"
+ destroy: "{{ dr_force }}"
+ data_center: "{{ sp_uuid }}"
+ register: result
+ until: dr_force or result is not failed
+ retries: "{{ dr_cleanup_retries_maintenance }}"
+ delay: "{{ dr_cleanup_delay_maintenance }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain_process.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain_process.yml
new file mode 100644
index 000000000..da8dd0173
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain_process.yml
@@ -0,0 +1,51 @@
+---
+- name: Remove storage domain process main block
+ block:
+ # TODO: Check what happens when we force remove unattached storage domain (probably should add a default empty GUID as a data center
+ # Answer: When we force remove an unattached storage domain, ansible tries to move it to maintenance and detach it first,
+ # although it might be that this storage domain is already detached and has no related data center, therefor the move to maintenance will fail
+
+ # We set an initial value for sp_uuid since this task is being called
+ # multiple times from the main task and sp_uuid is stateful.
+ - name: Set default boolean value for sp_uuid
+ ansible.builtin.set_fact: sp_uuid=True
+
+ - name: Detached storage domain - Set sp_uuid with empty GUID
+ ansible.builtin.set_fact: sp_uuid="00000000-0000-0000-0000-000000000000"
+ when: sd.data_centers is not defined
+
+ - name: Detached storage domain - Fetch active host for remove
+ ovirt_host_info:
+ pattern: "status=up"
+ auth: "{{ ovirt_auth }}"
+ register: host_info
+ when: sd.data_centers is not defined
+
+ - name: Attached storage domain - Fetch active host for remove
+ ovirt_host_info:
+ pattern: "status=up and storage={{ sd.name }}"
+ auth: "{{ ovirt_auth }}"
+ register: host_info
+ when: sd.data_centers is defined
+
+ # If sp_uuid is still initiated with the default boolean value,
+ # that means that there is a data center which the storage domain is attached to it.
+ - name: Attached storage domain - Set sp_uuid
+ ansible.builtin.set_fact: sp_uuid="{{ sd.data_centers[0].id }}"
+ when: sp_uuid
+
+ - name: Remove storage domain with no force
+ include_tasks: remove_domain.yml
+ vars:
+ host: "{{ host_info.ovirt_hosts[0].id }}"
+ when: "host_info.ovirt_hosts is defined and host_info.ovirt_hosts|length > 0 and not dr_force"
+
+ - name: Force remove storage domain
+ include_tasks: remove_domain.yml
+ vars:
+ host: "00000000-0000-0000-0000-000000000000"
+ when: "dr_force"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml
new file mode 100644
index 000000000..5dd0d41c4
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml
@@ -0,0 +1,22 @@
+---
+- name: Remove invalid storage domain main block
+ block:
+ - name: Fetch invalid storage domain for remove
+ ovirt_storage_domain_info:
+ pattern: name={{ storage['dr_' + dr_source_map + '_name'] }} and {{ dr_inactive_domain_search }}
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_info
+
+ - name: Remove invalid storage domain
+ include_tasks: remove_domain_process.yml
+ vars:
+ sd: "{{ sd }}"
+ with_items:
+ - "{{ storage_domain_info.ovirt_storage_domains }}"
+ when: (not only_master and not sd.master) or (only_master and sd.master)
+ loop_control:
+ loop_var: sd
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml
new file mode 100644
index 000000000..b5b215664
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml
@@ -0,0 +1,28 @@
+---
+- name: Remove valid storage domain main block
+ block:
+ - name: Fetch active/maintenance/detached storage domain for remove
+ ovirt_storage_domain_info:
+ pattern: >
+ name={{ storage['dr_' + dr_source_map + '_name'] }} and
+ (
+ datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }} and {{ dr_active_domain_search }} or
+ datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }} and {{ dr_maintenance_domain_search }} or
+ {{ dr_unattached_domain_search }}
+ )
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_info
+
+ - name: Remove valid storage domain
+ include_tasks: remove_domain_process.yml
+ vars:
+ sd: "{{ sd }}"
+ with_items:
+ - "{{ storage_domain_info.ovirt_storage_domains }}"
+ when: (not only_master and not sd.master) or (only_master and sd.master)
+ loop_control:
+ loop_var: sd
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_vms.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_vms.yml
new file mode 100644
index 000000000..d4aa1bf8d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_vms.yml
@@ -0,0 +1,12 @@
+---
+- name: Remove diskless VM main block
+ block:
+ - name: "Remove diskless VM '{{ vm.name }}'"
+ ovirt_vm:
+ state: absent
+ name: "{{ vm.name }}"
+ auth: "{{ ovirt_auth }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vm.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vm.yml
new file mode 100644
index 000000000..856e75429
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vm.yml
@@ -0,0 +1,14 @@
+---
+- name: Shutdown VM main block
+ block:
+ - name: "Shutdown VM '{{ vms.name }}'"
+ ovirt_vm:
+ state: stopped
+ name: "{{ vms.name }}"
+ force: true
+ wait: true
+ auth: "{{ ovirt_auth }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vms.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vms.yml
new file mode 100644
index 000000000..7b6a6d84e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vms.yml
@@ -0,0 +1,23 @@
+---
+- name: Shutdown VMs main block
+ block:
+ # Get all the running VMs related to a storage domain and shut them down
+ - name: Fetch VMs in the storage domain
+ ovirt_vm_info:
+ pattern: >
+ status != down and
+ storage.name={{ storage['dr_' + dr_source_map + '_name'] }} and
+ datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }}
+ auth: "{{ ovirt_auth }}"
+ register: vm_info
+
+ # TODO: Add a wait until the VM is really down
+ - name: Shutdown VMs
+ include_tasks: shutdown_vm.yml
+ vars:
+ vms: "{{ item }}"
+ with_items: "{{ vm_info.ovirt_vms }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/update_ovf_store.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/update_ovf_store.yml
new file mode 100644
index 000000000..3bec994f2
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/update_ovf_store.yml
@@ -0,0 +1,20 @@
+---
+- name: Update OVF store for active storage domain main block
+ block:
+ - name: Fetch storage domain only if active
+ ovirt_storage_domain_info:
+ pattern: status = active and storage.name={{ storage['dr_' + dr_source_map + '_name'] }}
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_info
+
+ - name: Update OVF store for active storage domain
+ ovirt_storage_domain:
+ state: update_ovf_store
+ name: "{{ iscsi_storage['dr_' + dr_source_map + '_name'] }}"
+ auth: "{{ ovirt_auth }}"
+ with_items:
+ - "{{ storage_domain_info.ovirt_storage_domains }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean_engine.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean_engine.yml
new file mode 100644
index 000000000..922f7ffcb
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean_engine.yml
@@ -0,0 +1,130 @@
+---
+- name: Clean engine main block
+ block:
+ - name: Obtain SSO token
+ ovirt_auth:
+ url: "{{ vars['dr_sites_' + dr_source_map + '_url'] }}"
+ username: "{{ vars['dr_sites_' + dr_source_map + '_username'] }}"
+ password: "{{ vars['dr_sites_' + dr_source_map + '_password'] }}"
+ ca_file: "{{ vars['dr_sites_' + dr_source_map + '_ca_file'] }}"
+
+ - name: Shutdown running VMs
+ include_tasks: clean/shutdown_vms.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ - name: Update OVF_STORE disk for storage domains
+ include_tasks: clean/update_ovf_store.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ - name: Set force remove flag to false for non master domains
+ ansible.builtin.set_fact: dr_force=False
+
+ # Set all the queries suffix to fetch a storage domain in a specific status.
+ # Note: Export storage domain is not supported and should not be part of storage mapping
+ - name: Setup queries for storage domains
+ ansible.builtin.set_fact:
+ dr_active_domain_search='status = active and type != cinder'
+ dr_maintenance_domain_search='status = maintenance and type != cinder'
+ dr_unattached_domain_search='status = unattached and type != cinder and type != glance'
+ dr_inactive_domain_search='type != glance and type != cinder and status != active'
+
+ - name: Set master storage domain filter
+ ansible.builtin.set_fact: only_master=False
+
+ - name: Remove non master storage domains with valid statuses
+ include_tasks: clean/remove_valid_filtered_master_domains.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ # We use inactive filter only at the end, since we are not sure if there were any storage domains
+ # which became inactive on the process or if there were any at the beginning.
+ - name: Set force remove flag to true for non master storage domains
+ ansible.builtin.set_fact: dr_force=True
+
+ - name: Remove non master storage domains with invalid statuses using force remove
+ include_tasks: clean/remove_invalid_filtered_master_domains.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ - name: Set master storage domain filter
+ ansible.builtin.set_fact: only_master=True
+
+ - name: Set force remove flag to false for master domain
+ ansible.builtin.set_fact: dr_force=False
+
+ - name: Remove master storage domains with valid statuses
+ include_tasks: clean/remove_valid_filtered_master_domains.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ - name: Set force remove flag to true for master domain
+ ansible.builtin.set_fact: dr_force=True
+
+ - name: Remove master storage domains with invalid statuses using force remove
+ include_tasks: clean/remove_invalid_filtered_master_domains.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ - name: Fetch leftover storage domains
+ ovirt_storage_domain_info:
+ pattern: type != glance
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_info
+
+ # TODO: Document that behavior
+ # Remove VMs only if there are no data storage domains left in the setup
+ - name: Fetch leftover VMs in the setup
+ ovirt_vm_info:
+ pattern: status = down
+ auth: "{{ ovirt_auth }}"
+ register: vm_info
+ when: dr_clean_orphaned_vms and storage_domain_info.ovirt_storage_domains | length == 0
+
+ - name: Remove VMs if no storage domains left in setup
+ include_tasks: clean/remove_vms.yml
+ vars:
+ vm: "{{ item }}"
+ with_items: "{{ vm_info.ovirt_vms }}"
+ when: dr_clean_orphaned_vms and storage_domain_info.ovirt_storage_domains | length == 0
+
+ # Remove direct LUN disks
+ - name: Fetch leftover direct LUN disks in the setup
+ ovirt_disk_info:
+ pattern: disk_type = lun and number_of_vms =0
+ auth: "{{ ovirt_auth }}"
+ register: disk_info
+ when: dr_clean_orphaned_disks and storage_domain_info.ovirt_storage_domains | length == 0
+
+ - name: Remove LUN disks if no storage domains left in setup
+ include_tasks: clean/remove_disks.yml
+ vars:
+ disk: "{{ item }}"
+ with_items: "{{ disk_info.ovirt_disks }}"
+ when: dr_clean_orphaned_disks and storage_domain_info.ovirt_storage_domains | length == 0
+
+
+ # Default value is set in role defaults
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
+
+ always:
+ - name: Revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/generate_mapping.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/generate_mapping.yml
new file mode 100644
index 000000000..508d1bee3
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/generate_mapping.yml
@@ -0,0 +1,15 @@
+---
+- name: Generate mapping var file main block
+ block:
+ - name: Generate mapping var file
+ ansible.builtin.command: >-
+ python3 {{ role_path }}/files/generate_mapping.py
+ -a "{{ site }}"
+ -u "{{ username }}"
+ -p "{{ password }}"
+ -c "{{ ca }}"
+ -f "{{ var_file }}"
+ changed_when: true
+ run_once: true
+ tags:
+ - generate_mapping
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/main.yml
new file mode 100644
index 000000000..bcd09d416
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/main.yml
@@ -0,0 +1,35 @@
+---
+- name: Main block
+ block:
+ - name: Start to unregister entities
+ include_tasks: unregister_entities.yml
+ tags:
+ - fail_back
+
+ - name: Clean engine setup
+ include_tasks: clean_engine.yml
+ tags:
+ - fail_back
+ - clean_engine
+
+ - name: Failback Replication Sync pause
+ ansible.builtin.pause:
+ prompt: "[Failback Replication Sync] Please press ENTER once the destination storage domains are ready to be used for the destination setup"
+ tags:
+ - fail_back
+
+ - name: Recover target engine
+ include_tasks: recover_engine.yml
+ tags:
+ - fail_over
+ - fail_back
+
+ - name: Run the appropriate unregistered entities
+ include_tasks: run_unregistered_entities.yml
+ tags:
+ - fail_back
+
+ - name: Generate mapping var file
+ include_tasks: generate_mapping.yml
+ tags:
+ - generate_mapping
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_domain.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_domain.yml
new file mode 100644
index 000000000..dca9d4d94
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_domain.yml
@@ -0,0 +1,61 @@
+---
+- name: Add storage domain main block
+ block:
+ - name: Fetch available hosts in data center
+ ovirt_host_info:
+ pattern: "status=up and datacenter={{ storage['dr_' + dr_target_host + '_dc_name'] }}"
+ auth: "{{ ovirt_auth }}"
+ register: host_info
+
+ - name: Check for available hosts block
+ block:
+ - name: "Check for available hosts"
+ ansible.builtin.fail: msg="No hosts available"
+ when: host_info.ovirt_hosts.0 is undefined
+
+ - name: Add storage domain block
+ block:
+ - name: Add storage domain if NFS
+ include_tasks: add_nfs_domain.yml
+ with_items:
+ - "{{ storage }}"
+ when: "storage.dr_domain_type == 'nfs'"
+ loop_control:
+ loop_var: nfs_storage
+
+ - name: Add storage domain if Gluster
+ include_tasks: add_glusterfs_domain.yml
+ with_items:
+ - "{{ storage }}"
+ when: "storage.dr_domain_type == 'glusterfs'"
+ loop_control:
+ loop_var: gluster_storage
+
+ - name: Add storage domain if POSIX
+ include_tasks: add_posixfs_domain.yml
+ with_items:
+ - "{{ storage }}"
+ when: "storage.dr_domain_type == 'posixfs'"
+ loop_control:
+ loop_var: posix_storage
+
+ - name: Add storage domain if iSCSI
+ include_tasks: add_iscsi_domain.yml
+ with_items:
+ - "{{ storage }}"
+ when: "storage.dr_domain_type == 'iscsi'"
+ loop_control:
+ loop_var: iscsi_storage
+
+ - name: Add storage domain if FCP
+ include_tasks: add_fcp_domain.yml
+ with_items:
+ - "{{ storage }}"
+ when: "storage.dr_domain_type == 'fcp'"
+ loop_control:
+ loop_var: fcp_storage
+ when: host_info.ovirt_hosts.0 is defined
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml
new file mode 100644
index 000000000..5dd49d4a9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml
@@ -0,0 +1,32 @@
+---
+- name: Import FCP storage domain main block
+ block:
+ - name: Import FCP storage domain
+ ovirt_storage_domain:
+ state: imported
+ id: "{{ fcp_storage['dr_domain_id'] }}"
+ name: "{{ fcp_storage['dr_' + dr_target_host + '_name'] | default('') }}"
+ critical_space_action_blocker: "{{ fcp_storage['dr_critical_space_action_blocker'] }}"
+ warning_low_space: "{{ fcp_storage['dr_warning_low_space'] }}"
+ discard_after_delete: "{{ fcp_storage['dr_discard_after_delete'] }}"
+ wipe_after_delete: "{{ fcp_storage['dr_wipe_after_delete'] }}"
+ backup: "{{ fcp_storage['dr_backup'] }}"
+ host: "{{ host_info.ovirt_hosts[0].name }}"
+ auth: "{{ ovirt_auth }}"
+ data_center: "{{ fcp_storage['dr_' + dr_target_host + '_dc_name'] }}"
+ fcp: {}
+ register: result
+
+ - name: Log append to succeed_storage_domains
+ ansible.builtin.set_fact:
+ succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ fcp_storage['dr_' + dr_target_host + '_name'] | default('') }}\" ]"
+ when: result is succeeded
+
+ - name: Log append to failed_storage_domains
+ ansible.builtin.set_fact:
+ failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ fcp_storage['dr_' + dr_target_host + '_name'] | default('') }}\" ]"
+ when: result is failed
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml
new file mode 100644
index 000000000..31715d61c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml
@@ -0,0 +1,32 @@
+---
+- name: Add Gluster storage domain main block
+ block:
+ - name: Add Gluster storage domain
+ ovirt_storage_domain:
+ name: "{{ gluster_storage['dr_' + dr_target_host + '_name'] }}"
+ critical_space_action_blocker: "{{ gluster_storage['dr_critical_space_action_blocker'] }}"
+ domain_function: "{{ gluster_storage['dr_storage_domain_type'] }}"
+ warning_low_space: "{{ gluster_storage['dr_warning_low_space'] }}"
+ wipe_after_delete: "{{ gluster_storage['dr_wipe_after_delete'] }}"
+ backup: "{{ gluster_storage['dr_backup'] }}"
+ host: "{{ host_info.ovirt_hosts[0].name }}"
+ data_center: "{{ gluster_storage['dr_' + dr_target_host + '_dc_name'] }}"
+ auth: "{{ ovirt_auth }}"
+ glusterfs:
+ path: "{{ gluster_storage['dr_' + dr_target_host + '_path'] }}"
+ address: "{{ gluster_storage['dr_' + dr_target_host + '_address'] }}"
+ register: result
+
+ - name: Log append to succeed_storage_domains
+ ansible.builtin.set_fact:
+ succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ gluster_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+ when: result is succeeded
+
+ - name: Log append to failed_storage_domains
+ ansible.builtin.set_fact:
+ failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ gluster_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+ when: result is failed
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml
new file mode 100644
index 000000000..a192e03b0
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml
@@ -0,0 +1,61 @@
+---
+- name: Import iSCSI storage domain main block
+ block:
+ # TODO: Add support for connect to multiple targets with the same LUN.
+ # Every connect should be done using a different IP.
+ - name: Import iSCSI storage domain block
+ block:
+ - name: Login to iSCSI targets
+ ovirt_host:
+ state: iscsilogin
+ name: "{{ host_info.ovirt_hosts[0].name }}"
+ auth: "{{ ovirt_auth }}"
+ iscsi:
+ username: "{{ iscsi_storage['dr_' + dr_target_host + '_username'] | default('') }}"
+ password: "{{ iscsi_storage['dr_' + dr_target_host + '_password'] | default('') }}"
+ address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}"
+ target: "{{ dr_target }}"
+ # Make port to be optional
+ port: "{{ iscsi_storage['dr_' + dr_target_host + '_port'] | default('3260' | int, true) }}"
+ with_items:
+ - "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}"
+ loop_control:
+ loop_var: dr_target
+
+ - name: Import iSCSI storage domain
+ ovirt_storage_domain:
+ state: imported
+ id: "{{ iscsi_storage['dr_domain_id'] }}"
+ name: "{{ iscsi_storage['dr_' + dr_target_host + '_name'] | default('') }}"
+ host: "{{ host_info.ovirt_hosts[0].name }}"
+ auth: "{{ ovirt_auth }}"
+ data_center: "{{ iscsi_storage['dr_' + dr_target_host + '_dc_name'] }}"
+ critical_space_action_blocker: "{{ iscsi_storage['dr_critical_space_action_blocker'] }}"
+ warning_low_space: "{{ iscsi_storage['dr_warning_low_space'] }}"
+ wipe_after_delete: "{{ iscsi_storage['dr_wipe_after_delete'] }}"
+ discard_after_delete: "{{ iscsi_storage['dr_discard_after_delete'] }}"
+ backup: "{{ iscsi_storage['dr_backup'] }}"
+ # TODO: For import iSCSI there is no need for the iscsi parameters
+ iscsi:
+ username: "{{ iscsi_storage['dr_' + dr_target_host + '_username'] | default('') }}"
+ password: "{{ iscsi_storage['dr_' + dr_target_host + '_password'] | default('') }}"
+ address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}"
+ # We use target since state imported in ovirt_storage_domain.py creates a storage domain
+ # which calls login, therefore we must have a target although the targets were already connected before.
+ # Therefore passing the first target in the list as a transient target.
+ target: "{{ dr_target }}"
+ with_items:
+ - "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}"
+ loop_control:
+ loop_var: dr_target
+ - name: Log append to succeed_storage_domains
+ ansible.builtin.set_fact:
+ succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ iscsi_storage['dr_' + dr_target_host + '_name'] | default('') }}\" ]"
+ rescue:
+ - name: Log append to failed_storage_domains
+ ansible.builtin.set_fact:
+ failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ iscsi_storage['dr_' + dr_target_host + '_name'] | default('') }}\" ]"
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml
new file mode 100644
index 000000000..eb3644f48
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml
@@ -0,0 +1,28 @@
+---
+- name: Add NFS storage domain main block
+ block:
+ - name: Add NFS storage domain
+ ovirt_storage_domain:
+ name: "{{ nfs_storage['dr_' + dr_target_host + '_name'] }}"
+ domain_function: "{{ nfs_storage['dr_storage_domain_type'] }}"
+ critical_space_action_blocker: "{{ nfs_storage['dr_critical_space_action_blocker'] }}"
+ wipe_after_delete: "{{ nfs_storage['dr_wipe_after_delete'] }}"
+ backup: "{{ nfs_storage['dr_backup'] }}"
+ host: "{{ host_info.ovirt_hosts[0].name }}"
+ data_center: "{{ nfs_storage['dr_' + dr_target_host + '_dc_name'] }}"
+ auth: "{{ ovirt_auth }}"
+ nfs:
+ path: "{{ nfs_storage['dr_' + dr_target_host + '_path'] }}"
+ address: "{{ nfs_storage['dr_' + dr_target_host + '_address'] }}"
+ - name: Log append to successful storage domains
+ ansible.builtin.set_fact:
+ succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ nfs_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+
+ rescue:
+ - name: Log append to failed storage domains
+ ansible.builtin.set_fact:
+ failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ nfs_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml
new file mode 100644
index 000000000..384101af9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml
@@ -0,0 +1,33 @@
+---
+- name: Add POSIX storage domain main block
+ block:
+ - name: Add POSIX storage domain
+ ovirt_storage_domain:
+ name: "{{ posix_storage['dr_' + dr_target_host + '_name'] }}"
+ critical_space_action_blocker: "{{ posix_storage['dr_critical_space_action_blocker'] }}"
+ domain_function: "{{ posix_storage['dr_storage_domain_type'] }}"
+ warning_low_space: "{{ posix_storage['dr_warning_low_space'] }}"
+ wipe_after_delete: "{{ posix_storage['dr_wipe_after_delete'] }}"
+ backup: "{{ posix_storage['dr_backup'] }}"
+ host: "{{ host_info.ovirt_hosts[0].name }}"
+ data_center: "{{ posix_storage['dr_' + dr_target_host + '_dc_name'] }}"
+ auth: "{{ ovirt_auth }}"
+ posixfs:
+ vfs_type: "{{ posix_storage['dr_' + dr_target_host + '_vfs_type'] }}"
+ path: "{{ posix_storage['dr_' + dr_target_host + '_path'] }}"
+ address: "{{ posix_storage['dr_' + dr_target_host + '_address'] }}"
+ register: result
+
+ - name: Log append to succeed_storage_domains
+ ansible.builtin.set_fact:
+ succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ posix_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+ when: result is succeeded
+
+ - name: Log append to failed_storage_domains
+ ansible.builtin.set_fact:
+ failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ posix_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+ when: result is failed
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/print_info.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/print_info.yml
new file mode 100644
index 000000000..6b8c85354
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/print_info.yml
@@ -0,0 +1,19 @@
+---
+- name: Print report file main block
+ block:
+ - name: Generate log file through template
+ ansible.builtin.template:
+ src: report_log_template.j2
+ dest: /tmp/{{ dr_report_file }}
+ mode: preserve
+
+ - name: Print report file
+ ansible.builtin.command: cat /tmp/{{ dr_report_file }}
+ changed_when: false
+ register: content
+
+ - name: Print report file to stdout
+ ansible.builtin.debug: msg="{{ content.stdout_lines | quote }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_template.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_template.yml
new file mode 100644
index 000000000..ebdbfe5f7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_template.yml
@@ -0,0 +1,29 @@
+---
+- name: Register unregistered template main block
+ block:
+ - name: "Register unregistered template '{{ unreg_template.id }}'"
+ ovirt_template:
+ state: registered
+ storage_domain: "{{ storage.name }}"
+ id: "{{ unreg_template.id }}"
+ allow_partial_import: "{{ dr_partial_import }}"
+ auth: "{{ ovirt_auth }}"
+ cluster_mappings: "{{ dr_cluster_map }}"
+ domain_mappings: "{{ dr_domain_map }}"
+ vnic_profile_mappings: "{{ dr_network_map }}"
+ role_mappings: "{{ dr_role_map }}"
+ register: template_register_result
+
+ - name: Log append failed template to failed_template_names
+ ansible.builtin.set_fact:
+ failed_template_names: "{{ failed_template_names }} + [ '{{ unreg_template.name }}' ]"
+ when: template_register_result is failed
+
+ - name: Log append succeeded template succeed_template_names
+ ansible.builtin.set_fact:
+ succeed_template_names: "{{ succeed_template_names }} + [ '{{ unreg_template.name }}' ]"
+ when: template_register_result is succeeded
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_templates.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_templates.yml
new file mode 100644
index 000000000..b1688fa4b
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_templates.yml
@@ -0,0 +1,23 @@
+---
+- name: Register unregistered templates main block
+ block:
+ - name: Fetch unregistered templates from storage domain
+ ovirt_storage_template_info:
+ nested_attributes: "id"
+ unregistered: true
+ storage_domain: "{{ storage.name }}"
+ auth: "{{ ovirt_auth }}"
+ register: storage_template_info
+
+ - name: Register unregistered templates
+ include: register_template.yml
+ # The main task is already declared to ignore errors so that might be
+ # redundant to put it here ignore_errors: "{{ ignore | default(yes) }}"
+ with_items: "{{ storage_template_info.ovirt_storage_templates }}"
+ # We use loop_control so storage.name will not be overridden by the nested loop.
+ loop_control:
+ loop_var: unreg_template
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vm.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vm.yml
new file mode 100644
index 000000000..d2d368681
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vm.yml
@@ -0,0 +1,33 @@
+---
+- name: Register VM main block
+ block:
+ - name: Register VM
+ ovirt_vm:
+ state: registered
+ storage_domain: "{{ storage.name }}"
+ id: "{{ unreg_vm.id }}"
+ auth: "{{ ovirt_auth }}"
+ allow_partial_import: "{{ dr_partial_import }}"
+ cluster_mappings: "{{ dr_cluster_map }}"
+ domain_mappings: "{{ dr_domain_map }}"
+ role_mappings: "{{ dr_role_map }}"
+ affinity_group_mappings: "{{ dr_affinity_group_map }}"
+ affinity_label_mappings: "{{ dr_affinity_label_map }}"
+ vnic_profile_mappings: "{{ dr_network_map }}"
+ lun_mappings: "{{ dr_lun_map }}"
+ reassign_bad_macs: "{{ dr_reset_mac_pool }}"
+ register: vm_register_result
+
+ - name: Log append failed VM to failed_vm_names
+ ansible.builtin.set_fact:
+ failed_vm_names: "{{ failed_vm_names }} + [ '{{ unreg_vm.name }}' ]"
+ when: vm_register_result is failed
+
+ - name: Log append succeeded VM to succeed_vm_names
+ ansible.builtin.set_fact:
+ succeed_vm_names: "{{ succeed_vm_names }} + [ '{{ unreg_vm.name }}' ]"
+ when: vm_register_result is succeeded
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vms.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vms.yml
new file mode 100644
index 000000000..9788f6037
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vms.yml
@@ -0,0 +1,26 @@
+---
+- name: Register VMs main block
+ block:
+ - name: Fetch unregistered VMs from storage domain
+ ovirt_storage_vm_info:
+ nested_attributes: "id"
+ unregistered: true
+ storage_domain: "{{ storage.name }}"
+ auth: "{{ ovirt_auth }}"
+ register: storage_vm_info
+
+ - name: Set unregistered VMs
+ ansible.builtin.set_fact:
+ unreg_vms: "{{ unreg_vms | default([]) + storage_vm_info.ovirt_storage_vms }}"
+
+ # TODO: We should filter out VMs which already exist in the setup (diskless VMs)
+ - name: Register VMs
+ include: register_vm.yml
+ with_items: "{{ storage_vm_info.ovirt_storage_vms }}"
+ # We use loop_control so storage.name will not be overridden by the nested loop.
+ loop_control:
+ loop_var: unreg_vm
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/report_log_template.j2 b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/report_log_template.j2
new file mode 100644
index 000000000..6fbccb8a8
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/report_log_template.j2
@@ -0,0 +1,24 @@
+{% if succeed_vm_names | length > 0 %}
+ The following VMs registered successfully: {{ succeed_vm_names | unique | join (", ") }}
+{% endif %}
+{% if failed_vm_names | length > 0 %}
+ The following VMs failed to be registered: {{ failed_vm_names | unique | join (", ") }}
+{% endif %}
+{% if succeed_template_names | length > 0 %}
+ The following Templates registered successfully: {{ succeed_template_names | unique | join (", ") }}
+{% endif %}
+{% if failed_template_names | length > 0 %}
+ The following Templates failed to be registered: {{ failed_template_names | unique | join (", ") }}
+{% endif %}
+{% if succeed_to_run_vms | length > 0 %}
+ The following VMs started successfully: {{ succeed_to_run_vms | unique | join (", ") }}
+{% endif %}
+{% if failed_to_run_vms | length > 0 %}
+ The following VMs failed to run: {{ failed_to_run_vms | unique | join (", ") }}
+{% endif %}
+{% if succeed_storage_domains | length > 0 %}
+ The following storage domains were successfully added: {{ succeed_storage_domains | unique | join (", ") }}
+{% endif %}
+{% if failed_storage_domains | length > 0 %}
+ The following storage domains were not added: {{ failed_storage_domains | unique | join (", ") }}
+{% endif %}
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/run_vms.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/run_vms.yml
new file mode 100644
index 000000000..a4f6e56fe
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/run_vms.yml
@@ -0,0 +1,23 @@
+---
+- name: Run VMs main block
+ block:
+ - name: Run VMs
+ ovirt_vm:
+ state: running
+ name: "{{ vms.name }}"
+ wait: false
+ auth: "{{ ovirt_auth }}"
+ register: result
+ - name: Log append succeed_to_run_vms
+ ansible.builtin.set_fact:
+ succeed_to_run_vms: "{{ succeed_to_run_vms }} + [ '{{ vms.name }}' ]"
+ when: result is succeeded
+
+ - name: Log append failed_to_run_vms
+ ansible.builtin.set_fact:
+ failed_to_run_vms: "{{ failed_to_run_vms }} + [ '{{ vms.name }}' ]"
+ when: result is failed
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover_engine.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover_engine.yml
new file mode 100644
index 000000000..05e21b56e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover_engine.yml
@@ -0,0 +1,213 @@
+---
+- name: Recover engine main block
+ block:
+ - name: Obtain SSO token
+ ovirt_auth:
+ url: "{{ vars['dr_sites_' + dr_target_host + '_url'] }}"
+ username: "{{ vars['dr_sites_' + dr_target_host + '_username'] }}"
+ password: "{{ vars['dr_sites_' + dr_target_host + '_password'] }}"
+ ca_file: "{{ vars['dr_sites_' + dr_target_host + '_ca_file'] }}"
+ ignore_errors: false
+
+ - name: Delete previous report log
+ ansible.builtin.file:
+ path: "/tmp/{{ dr_report_file }}"
+ state: absent
+ ignore_errors: true
+
+ - name: Create report file
+ ansible.builtin.file:
+ path: "/tmp/{{ dr_report_file }}"
+ state: touch
+ mode: 0644
+
+ - name: Init entity status list
+ ansible.builtin.set_fact:
+ failed_vm_names: []
+ succeed_vm_names: []
+ failed_template_names: []
+ succeed_template_names: []
+ failed_to_run_vms: []
+ succeed_to_run_vms: []
+ succeed_storage_domains: []
+ failed_storage_domains: []
+
+ # TODO: We should add a validation task that will validate whether
+ # all the hosts in the other site (primary or secondary) could not be connected
+ # and also set a timer that will wait at least 180 seconds until the first
+ # attach will take place. We should do that to prevent Sanlock failure with acquire
+ # lockspace. We should use a flag with default true whether to have this check
+ # or not.
+
+ # TODO: What happens if master is failed to be attached,
+ # do we still want to continue and attach the other storage
+ # domain (which will make another storage domain as master instead).
+ - name: Add master storage domain to the setup
+ include_tasks: recover/add_domain.yml
+ vars:
+ storage: "{{ item }}"
+ with_items:
+ - "{{ dr_import_storages }}"
+ when: item['dr_' + dr_target_host + '_master_domain']
+
+ - name: Add non master storage domains to the setup
+ include_tasks: recover/add_domain.yml
+ vars:
+ storage: "{{ item }}"
+ with_items:
+ - "{{ dr_import_storages }}"
+ when: not item['dr_' + dr_target_host + '_master_domain']
+
+ # Get all the active storage domains in the setup to register
+ # all the templates/VMs/Disks
+ - name: Fetching active storage domains
+ ovirt_storage_domain_info:
+ pattern: "status=active"
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_info
+
+ - name: Set initial Maps
+ ansible.builtin.set_fact:
+ dr_cluster_map: "{{ [] }}"
+ dr_affinity_group_map: "{{ [] }}"
+ dr_affinity_label_map: "{{ [] }}"
+ dr_domain_map: "{{ [] }}"
+ dr_role_map: "{{ [] }}"
+ dr_lun_map: "{{ [] }}"
+ dr_network_map: "{{ [] }}"
+
+ - name: Set Cluster Map
+ ansible.builtin.set_fact:
+ dr_cluster_map: "{{ dr_cluster_map + [
+ {
+ 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true),
+ 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true)
+ }
+ ] }}"
+ with_items: "{{ dr_cluster_mappings }}"
+ when: dr_cluster_mappings is not none
+
+ - name: Set Affinity Group Map
+ ansible.builtin.set_fact:
+ dr_affinity_group_map: "{{ dr_affinity_group_map + [
+ {
+ 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true),
+ 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true)
+ }
+ ] }}"
+ with_items: "{{ dr_affinity_group_mappings }}"
+ when: dr_affinity_group_mappings is not none
+
+ - name: Set Network Map
+ ansible.builtin.set_fact:
+ dr_network_map: "{{ dr_network_map + [
+ {
+ 'source_network_name': item[dr_source_map + '_network_name'] | default('EMPTY_ELEMENT', true),
+ 'source_profile_name': item[dr_source_map + '_profile_name'] | default('EMPTY_ELEMENT', true),
+ 'target_network_dc': item[dr_target_host + '_network_dc'] | default('EMPTY_ELEMENT', true),
+ 'target_profile_id': item[dr_target_host + '_profile_id'] | default('00000000-0000-0000-0000-000000000000', true)
+ }
+ ] }}"
+ with_items: "{{ dr_network_mappings }}"
+ when: dr_network_mappings is not none
+
+ - name: Set Affinity Label Map
+ ansible.builtin.set_fact:
+ dr_affinity_label_map: "{{ dr_affinity_label_map + [
+ {
+ 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true),
+ 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true)
+ }
+ ] }}"
+ with_items: "{{ dr_affinity_label_mappings }}"
+ when: dr_affinity_label_mappings is not none
+
+ - name: Set aaa extensions Map
+ ansible.builtin.set_fact:
+ dr_domain_map: "{{ dr_domain_map + [
+ {
+ 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true),
+ 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true)
+ }
+ ] }}"
+ with_items: "{{ dr_domain_mappings }}"
+ when: dr_domain_mappings is not none
+
+ - name: Set Role Map
+ ansible.builtin.set_fact:
+ dr_role_map: "{{ dr_role_map + [
+ {
+ 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true),
+ 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true)
+ }
+ ] }}"
+ with_items: "{{ dr_role_mappings }}"
+ when: dr_role_mappings is not none
+
+ - name: Set Lun Map
+ ansible.builtin.set_fact:
+ dr_lun_map: "{{ dr_lun_map + [
+ {
+ 'source_logical_unit_id': item[dr_source_map + '_logical_unit_id'] | default('EMPTY_ELEMENT', true),
+ 'source_storage_type': item[dr_source_map + '_storage_type'] | default('EMPTY_ELEMENT', true),
+ 'dest_logical_unit_id': item[dr_target_host + '_logical_unit_id'] | default('EMPTY_ELEMENT', true),
+ 'dest_storage_type': item[dr_target_host + '_storage_type'] | default('EMPTY_ELEMENT', true),
+ 'dest_logical_unit_address': item[dr_target_host + '_logical_unit_address'] | default('EMPTY_ELEMENT', true),
+ 'dest_logical_unit_port': item[dr_target_host + '_logical_unit_port'] | default('3260' | int, true),
+ 'dest_logical_unit_portal': item[dr_target_host + '_logical_unit_portal'] | default('1', true),
+ 'dest_logical_unit_username': item[dr_target_host + '_logical_unit_username'] | default('', true),
+ 'dest_logical_unit_password': item[dr_target_host + '_logical_unit_password'] | default('', true),
+ 'dest_logical_unit_target': item[dr_target_host + '_logical_unit_target'] | default('[]', true)
+ }
+ ] }}"
+ with_items: "{{ dr_lun_mappings }}"
+ when: dr_lun_mappings is not none
+
+ # First register all the unregistered templates based on the
+ # active storage domains we fetched before.
+ # We register the Templates first since we might have
+ # VMs which are based on them
+ - name: Register templates
+ include_tasks: recover/register_templates.yml
+ vars:
+ storage: "{{ item }}"
+ with_items:
+ - "{{ storage_domain_info.ovirt_storage_domains }}"
+
+ # Register all the unregistered VMs after we registered
+ # all the templates from the active storage domains fetched before.
+ - name: Register VMs
+ include_tasks: recover/register_vms.yml
+ vars:
+ storage: "{{ item }}"
+ with_items:
+ - "{{ storage_domain_info.ovirt_storage_domains }}"
+
+ # Run all the high availability VMs.
+ - name: Run highly available VMs
+ include_tasks: recover/run_vms.yml
+ vars:
+ vms: "{{ item }}"
+ with_items: "{{ unreg_vms }}"
+ when: item.status == 'up' and item.high_availability.enabled | bool
+
+ # Run all the rest of the VMs.
+ - name: Run the rest of the VMs
+ include_tasks: recover/run_vms.yml
+ vars:
+ vms: "{{ item }}"
+ with_items: "{{ unreg_vms }}"
+ when: item.status == 'up' and not item.high_availability.enabled | bool
+
+ # Default value is set in role defaults
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
+ always:
+ - name: Print operation summary
+ include_tasks: recover/print_info.yml
+ - name: Revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/run_unregistered_entities.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/run_unregistered_entities.yml
new file mode 100644
index 000000000..cfaf642b1
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/run_unregistered_entities.yml
@@ -0,0 +1,42 @@
+---
+- name: Run unregistered entities main block
+ block:
+ - name: Obtain SSO token
+ ovirt_auth:
+ url: "{{ vars['dr_sites_' + dr_target_host + '_url'] }}"
+ username: "{{ vars['dr_sites_' + dr_target_host + '_username'] }}"
+ password: "{{ vars['dr_sites_' + dr_target_host + '_password'] }}"
+ ca_file: "{{ vars['dr_sites_' + dr_target_host + '_ca_file'] }}"
+
+ - name: Read file that contains running VMs from the previous setup
+ ansible.builtin.set_fact: running_vms_fail_back="{{ lookup('file', dr_running_vms) }}"
+
+ - name: Remove dr_running_vms file after being used
+ ansible.builtin.file:
+ path: "{{ dr_running_vms }}"
+ state: absent
+
+ - name: Run all the (previously running) high availability VMs
+ include_tasks: recover/run_vms.yml
+ vars:
+ vms: "{{ item }}"
+ with_items: "{{ running_vms_fail_back }}"
+ when: item.high_availability.enabled | bool
+
+ - name: Run all the (previously running) non high availability VMs
+ include_tasks: recover/run_vms.yml
+ vars:
+ vms: "{{ item }}"
+ with_items: "{{ running_vms_fail_back }}"
+ when: not item.high_availability.enabled | bool
+
+ # TODO: Remove dr_report_file
+
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ always:
+ - name: Revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/unregister_entities.yml b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/unregister_entities.yml
new file mode 100644
index 000000000..e7ad22ab2
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/unregister_entities.yml
@@ -0,0 +1,62 @@
+---
+- name: Unregister entities main block
+ block:
+ - name: Obtain SSO token
+ ovirt_auth:
+ url: "{{ vars['dr_sites_' + dr_source_map + '_url'] }}"
+ username: "{{ vars['dr_sites_' + dr_source_map + '_username'] }}"
+ password: "{{ vars['dr_sites_' + dr_source_map + '_password'] }}"
+ ca_file: "{{ vars['dr_sites_' + dr_source_map + '_ca_file'] }}"
+
+ # Get all the running VMs and shut them down
+ - name: Fetch running VMs in the setup
+ ovirt_vm_info:
+ pattern: status = up
+ auth: "{{ ovirt_auth }}"
+ register: vm_info
+
+ - name: Check whether file with running VMs info exists
+ ansible.builtin.stat:
+ path: '{{ dr_running_vms }}'
+ register: stat_result
+
+ - name: Fetch all data of running VMs from file, if exists
+ ansible.builtin.set_fact: running_vms_fail_back="{{ lookup('file', dr_running_vms) }}"
+ when: stat_result.stat.exists
+ ignore_errors: true
+
+ - name: Init list property for running_vms
+ ansible.builtin.set_fact:
+ res_ovirt_vms="[]"
+
+ - name: Map all running VMs in fact
+ ansible.builtin.set_fact:
+ res_ovirt_vms: "{{ res_ovirt_vms + [
+ {
+ 'id': item.id,
+ 'name': item.name,
+ 'high_availability': item.high_availability
+ }
+ ] }}"
+ with_items: "{{ vm_info.ovirt_vms }}"
+ when: item.id is defined
+
+ - name: Create file to obtain running VMs if file does not exist
+ ansible.builtin.file:
+ path: '{{ dr_running_vms }}'
+ state: touch
+ mode: 0644
+ when: not stat_result.stat.exists|bool or running_vms_fail_back is not defined
+
+ - name: If no file exists which contains data of unregistered VMs, set the file with running VMs
+ ansible.builtin.copy: content="{{ res_ovirt_vms }}" dest={{ dr_running_vms }} mode="preserve"
+ when: running_vms_fail_back is not defined or (running_vms_fail_back is defined and running_vms_fail_back | length == 0)
+
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ always:
+ - name: Revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/README.md b/ansible_collections/ovirt/ovirt/roles/engine_setup/README.md
new file mode 100644
index 000000000..78e77cc97
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/README.md
@@ -0,0 +1,169 @@
+oVirt Engine Setup
+==================
+
+Installs required packages for oVirt Engine deployment, generates answerfile
+and runs engine_setup.
+Optionally the role updates oVirt engine packages.
+
+Role Variables
+--------------
+
+By default engine_setup uses an answer file specific for version of oVirt
+based on ``ovirt_engine_setup_version`` parameter. You can provide your own answer file
+to ``ovirt_engine_setup_answer_file_path`` variable.
+
+* Common options for role:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_answer_file_path | UNDEF | Path to custom answerfile for `engine-setup`. |
+| ovirt_engine_setup_use_remote_answer_file | False | If `True`, use answerfile's path on the remote machine. This option should be used if the installation occurs on the remote machine and the answerfile is located there as well. |
+| ovirt_engine_setup_update_setup_packages | False | If `True`, setup packages will be updated before `engine-setup` is executed. It makes sense if Engine has already been installed. |
+| ovirt_engine_setup_perform_upgrade | False | If `True`, this role is used to perform an upgrade. |
+| ovirt_engine_setup_product_type | oVirt | One of ["oVirt", "RHV"], case insensitive. |
+| ovirt_engine_setup_offline | False | If `True`, updates for all packages will be disabled. |
+| ovirt_engine_setup_restore_engine_cleanup | False | Remove the configuration files and clean the database associated with the Engine, relevant only when `ovirt_engine_setup_restore_file` is defined |
+| ovirt_engine_setup_restore_file | UNDEF | Restored the engine with a backup file which created with engine-backup. |
+| ovirt_engine_setup_restore_scopes | UNDEF | List of scopes following values are available: ["all", "files", "db", "dwhdb", "cinderlibdb"]. |
+| ovirt_engine_setup_restore_options | {} | Dictionary that will add engine restore options as "`--key`=`value`" when `value` is not empty, otherwise it will append "`--key`" only. |
+| ovirt_engine_setup_validate_certs | UNDEF | If `True`, setup will validate the engine certificates when checking engine health status page. |
+
+* Common options for engine:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_version | 4.5 | Allowed versions: [4.1, 4.2, 4.3, 4.4, 4.5]. |
+| ovirt_engine_setup_package_list | [] | List of extra packages to be installed on engine apart from `ovirt-engine` package. |
+| ovirt_engine_setup_fqdn | UNDEF | Host fully qualified DNS name of the server. |
+| ovirt_engine_setup_organization | UNDEF | Organization name for certificate. |
+| ovirt_engine_setup_firewall_manager | firewalld | Specify the type of firewall manager to configure on Engine host, following values are available: `firewalld`,`iptables` or empty value (`null`) to skip firewall configuration. |
+| ovirt_engine_setup_require_rollback | UNDEF | If `True`, setup will require to be able to rollback new packages in case of a failure. If not specified, the default answer from `engine-setup` will be used. Valid for updating/upgrading. |
+| ovirt_engine_setup_admin_password | UNDEF | Password for the automatically created administrative user of the oVirt Engine.
+| ovirt_engine_setup_wait_running_tasks | False | If `True`, engine-setup will wait for running tasks to finish. Valid for `ovirt_engine_setup_version` >= 4.2. |
+| ovirt_engine_cinderlib_enable | False | If `True`, cinderlib is enabled. Valid for `ovirt_engine_setup_version` >= 4.3. |
+| ovirt_engine_grafana_enable | True | If `True`, Grafana integration will be set up. Valid for `ovirt_engine_setup_version` >= 4.4. |
+| ovirt_engine_setup_skip_renew_pki_confirm | True | If `True` PKI renewal will be skipped
+| ovirt_engine_setup_engine_configs | [] | List of dictionaries with keys `key`, `value` and `version`. The engine-config will be called with parametrs "-s `key`=`value`" when specified `version` it will append "--cver=`version`" to the config. |
+
+* Engine Database:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_db_host | localhost | IP address or host name of a PostgreSQL server for Engine database. By default the database will be configured on the same host as the Engine. |
+| ovirt_engine_setup_db_port | 5432 | Engine database port. |
+| ovirt_engine_setup_db_name | engine | Engine database name. |
+| ovirt_engine_setup_db_user | engine | Engine database user. |
+| ovirt_engine_setup_db_password | UNDEF | Engine database password. |
+| ovirt_engine_setup_engine_vacuum_full | False | Used only when upgrading. If `True`, engine database vacuum will be performed before upgrade. |
+
+* Engine Data Warehouse Database:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_dwh_db_configure | True | If `True`, the DWH Database will be configured manually. |
+| ovirt_engine_setup_dwh_db_host | localhost | IP address or host name of a PostgreSQL server for DWH database. By default the DWH database will be configured on the same host as the Engine. |
+| ovirt_engine_setup_dwh_db_port | 5432 | DWH database port. |
+| ovirt_engine_setup_dwh_db_name | ovirt_engine_history | DWH database name. |
+| ovirt_engine_setup_dwh_db_user | ovirt_engine_history | DWH database user. |
+| ovirt_engine_setup_dwh_db_password | UNDEF | DWH database password. |
+| ovirt_engine_setup_dwh_vacuum_full | False | Used only when upgrading. If `True`, DWH databse vacuum will be performed before upgrade. |
+
+* OVN related options:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_provider_ovn_configure| True | If `True`, OVN provider will be configured. Valid for `ovirt_engine_setup_version` >= 4.2. |
+| ovirt_engine_setup_provider_ovn_username | admin@internal | Username for OVN. |
+| ovirt_engine_setup_provider_ovn_password | UNDEF | Password for OVN. |
+
+* Apache related options:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_apache_config_root_redirection | True | If `True`, `engine-setup` will configure the default page in Apache to automatically redirect clients to ovirt-engine default page. |
+| ovirt_engine_setup_apache_config_ssl | True | If `False`, `engine-setup` will not configure Apache SSL settings and administrators will need to configure it manually. |
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+```yaml
+---
+# Example of oVirt setup:
+- name: Setup oVirt
+ hosts: engine
+ vars_files:
+ # Contains encrypted `ovirt_engine_setup_admin_password` variable using ansible-vault
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: '4.5'
+ ovirt_engine_setup_organization: 'of.ovirt.engine.com'
+ roles:
+ - engine_setup
+ collections:
+ - ovirt.ovirt
+
+
+# Example of RHV setup:
+- name: Setup RHV
+ hosts: engine
+ vars_files:
+ # Contains encrypted `ovirt_engine_setup_admin_password` variable using ansible-vault
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: '4.5'
+ ovirt_engine_setup_organization: 'rhv.redhat.com'
+ ovirt_engine_setup_product_type: 'rhv'
+ roles:
+ - engine_setup
+ collections:
+ - ovirt.ovirt
+
+
+# Example of oVirt setup with engine_configs:
+- name: Setup oVirt
+ hosts: engine
+ vars_files:
+ # Contains encrypted `ovirt_engine_setup_admin_password` variable using ansible-vault
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: '4.5'
+ ovirt_engine_setup_organization: 'of.ovirt.engine.com'
+ ovirt_engine_setup_engine_configs:
+ - key: SpiceProxyDefault
+ value: prot://proxy
+ version: general
+
+ roles:
+ - engine_setup
+ collections:
+ - ovirt.ovirt
+
+
+# Example of oVirt engine restore from file with cleanup engine before:
+- name: restore oVirt engine
+ hosts: engine
+ vars_files:
+ # Contains encrypted `ovirt_engine_setup_admin_password` variable using ansible-vault
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: '4.5'
+ ovirt_engine_setup_organization: 'of.ovirt.engine.com'
+ ovirt_engine_setup_restore_engine_cleanup: true
+ ovirt_engine_setup_restore_file: '/path/to/backup.file'
+ ovirt_engine_setup_restore_scopes:
+ - 'files'
+ - 'db'
+ ovirt_engine_setup_restore_options:
+ log: '/path/to/file.log'
+ restore-permissions: ''
+ provision-all-databases: ''
+ roles:
+ - engine_setup
+ collections:
+ - ovirt.ovirt
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/defaults/main.yml
new file mode 100644
index 000000000..bd86c0821
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/defaults/main.yml
@@ -0,0 +1,41 @@
+---
+ovirt_engine_setup_version: '4.5'
+
+ovirt_engine_setup_provider_ovn_configure: true
+ovirt_engine_setup_provider_ovn_username: 'admin@internal'
+
+ovirt_engine_setup_db_host: 'localhost'
+ovirt_engine_setup_db_port: 5432
+ovirt_engine_setup_db_name: 'engine'
+ovirt_engine_setup_db_user: 'engine'
+ovirt_engine_setup_engine_vacuum_full: false
+
+ovirt_engine_setup_dwh_db_configure: true
+ovirt_engine_setup_dwh_db_host: 'localhost'
+ovirt_engine_setup_dwh_db_port: 5432
+ovirt_engine_setup_dwh_db_name: 'ovirt_engine_history'
+ovirt_engine_setup_dwh_db_user: 'ovirt_engine_history'
+ovirt_engine_setup_dwh_vacuum_full: false
+
+ovirt_engine_grafana_enable: true
+
+ovirt_engine_setup_firewall_manager: 'firewalld'
+
+# This option is suggested from oVirt Documentation
+# https://www.ovirt.org/documentation/install-guide/chap-Installing_oVirt/
+ovirt_engine_setup_update_setup_packages: false
+ovirt_engine_setup_offline: false
+
+ovirt_engine_setup_product_type: oVirt
+ovirt_engine_setup_package_list: []
+ovirt_engine_setup_use_remote_answer_file: false
+
+ovirt_engine_setup_perform_upgrade: false
+
+ovirt_engine_apache_config_ssl: true
+ovirt_engine_apache_config_root_redirection: true
+
+ovirt_engine_setup_restore_engine_cleanup: false
+ovirt_engine_setup_restore_options: {}
+
+ovirt_engine_setup_skip_renew_pki_confirm: true
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-deploy.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-deploy.yml
new file mode 100644
index 000000000..8ed996833
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-deploy.yml
@@ -0,0 +1,18 @@
+---
+- name: Setup ovirt repositories and deploy oVirt Engine
+ hosts: engine
+ vars_files:
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_product_type: 'ovirt'
+ ovirt_engine_setup_version: "4.5"
+ ovirt_engine_setup_organization: "example.com"
+ ovirt_engine_setup_dwh_db_host: "localhost"
+ ovirt_engine_setup_configure_iso_domain: true
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_repositories_ovirt_release_rpm: "http://plain.resources.ovirt.org/pub/yum-repo/ovirt-release44.rpm"
+ roles:
+ - role: repositories
+ - role: engine_setup
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-upgrade.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-upgrade.yml
new file mode 100644
index 000000000..6cf61fda2
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-upgrade.yml
@@ -0,0 +1,19 @@
+---
+- name: Setup ovirt repositories, deploy oVirt Engine and then upgrade it
+ hosts: engine
+ vars_files:
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_product_type: "ovirt"
+ ovirt_engine_setup_version: "{{ ovirt_engine_setup_version }}"
+ ovirt_engine_setup_organization: "example.com"
+ ovirt_engine_setup_configure_iso_domain: true
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_repositories_ovirt_release_rpm: "{{ ovirt_repositories_ovirt_release_rpm }}"
+ roles:
+ - role: repositories
+ - role: engine_setup
+ ovirt_engine_setup_update_packages: true
+ ovirt_engine_setup_answer_file_path: "answerfile_{{ ovirt_engine_setup_version }}_upgrade.txt.j2"
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/passwords.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/passwords.yml
new file mode 100644
index 000000000..80d6df40f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+ovirt_engine_setup_admin_password: 123456
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/engine_setup.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/engine_setup.yml
new file mode 100644
index 000000000..e85926eff
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/engine_setup.yml
@@ -0,0 +1,121 @@
+---
+- name: Engine setup block
+ block:
+ - name: Set answer file path
+ ansible.builtin.set_fact:
+ answer_file_path: "/tmp/answerfile-{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}.txt"
+
+ - name: Use the default answerfile
+ ansible.builtin.template:
+ src: answerfile_{{ ovirt_engine_setup_version }}_basic.txt.j2
+ dest: "{{ answer_file_path }}"
+ mode: 0600
+ owner: root
+ group: root
+ when: ovirt_engine_setup_answer_file_path is undefined
+ no_log: true
+
+ - name: Copy custom answer file
+ ansible.builtin.template:
+ src: "{{ ovirt_engine_setup_answer_file_path }}"
+ dest: "{{ answer_file_path }}"
+ mode: 0600
+ owner: root
+ group: root
+ when: ovirt_engine_setup_answer_file_path is defined and (
+ ovirt_engine_setup_use_remote_answer_file is not defined or not
+ ovirt_engine_setup_use_remote_answer_file)
+ no_log: true
+
+ - name: Use remote's answer file
+ ansible.builtin.set_fact:
+ answer_file_path: "{{ ovirt_engine_setup_answer_file_path }}"
+ when: ovirt_engine_setup_use_remote_answer_file | bool
+
+ - name: Update setup packages
+ ansible.builtin.yum:
+ name: "ovirt*setup*"
+ update_only: true
+ state: latest
+ when: ovirt_engine_setup_update_setup_packages or ovirt_engine_setup_perform_upgrade
+ tags:
+ - "skip_ansible_lint" # ANSIBLE0006
+
+ - name: Copy yum configuration file
+ ansible.builtin.copy:
+ src: "/etc/yum.conf"
+ dest: "/tmp/yum.conf"
+ owner: root
+ group: root
+ mode: 0644
+ remote_src: true
+
+ - name: Set 'best' to false
+ ansible.builtin.replace:
+ path: "/tmp/yum.conf"
+ regexp: '^best=True'
+ replace: 'best=False'
+ owner: root
+ group: root
+ mode: 0644
+
+ - name: Update all packages
+ ansible.builtin.yum:
+ name: '*'
+ state: latest
+ conf_file: /tmp/yum.conf
+ when: not ovirt_engine_setup_offline | bool
+ tags:
+ - "skip_ansible_lint" # ANSIBLE0010
+
+ - name: Remove temporary yum configuration file
+ ansible.builtin.file:
+ path: "/tmp/yum.conf"
+ state: absent
+ ignore_errors: true
+
+ - name: Set offline parameter if variable is set
+ ansible.builtin.set_fact:
+ offline: "{{ '--offline' if ovirt_engine_setup_offline | bool else '' }}"
+
+ - name: Restore engine from file
+ include_tasks: restore_engine_from_file.yml
+ when: ovirt_engine_setup_restore_file is defined
+
+ - name: Run engine-setup with answerfile
+ command: "engine-setup --accept-defaults --config-append={{ answer_file_path }} {{ offline }}"
+ tags:
+ - skip_ansible_lint
+
+ - name: Make sure `ovirt-engine` service is running
+ ansible.builtin.service:
+ name: ovirt-engine
+ state: started
+
+ - name: Run engine-config
+ ansible.builtin.command: "engine-config -s {{ item.key }}='{{ item.value }}' {% if item.version is defined %} --cver={{ item.version }} {% endif %}"
+ loop: "{{ ovirt_engine_setup_engine_configs | default([]) }}"
+ tags:
+ - skip_ansible_lint
+
+ - name: Restart engine after engine-config
+ ansible.builtin.service:
+ name: ovirt-engine
+ state: restarted
+ when: ovirt_engine_setup_engine_configs is defined
+
+ - name: Check if Engine health page is up
+ ansible.builtin.uri:
+ validate_certs: "{{ ovirt_engine_setup_validate_certs | default(omit) }}"
+ url: "http://localhost/ovirt-engine/services/health"
+ status_code: 200
+ register: health_page
+ retries: 30
+ delay: 10
+ until: health_page is success
+
+ always:
+ - name: Clean temporary files
+ ansible.builtin.file:
+ path: "{{ answer_file_path }}"
+ state: 'absent'
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/install_packages.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/install_packages.yml
new file mode 100644
index 000000000..c0003ee10
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/install_packages.yml
@@ -0,0 +1,25 @@
+---
+- name: Install oVirt Engine package
+ ansible.builtin.package:
+ name: "ovirt-engine"
+ state: present
+ when: ovirt_engine_setup_product_type | lower == 'ovirt'
+
+- name: Check if rhevm package is installed
+ ansible.builtin.yum:
+ list: "rhevm"
+ when: ovirt_engine_setup_product_type | lower == 'rhv' and ansible_os_family == 'RedHat'
+ register: rhevm_installed
+
+- name: Install RHV package
+ ansible.builtin.package:
+ name: "{{ 'rhevm' if ovirt_engine_setup_version is version('4.2', '<') else 'rhvm' }}"
+ state: present
+ when: ovirt_engine_setup_product_type | lower == 'rhv' and rhevm_installed.results | default([]) | selectattr(
+ 'yumstate', 'match', 'installed') | list | length == 0
+
+- name: Install rest of the packages required for oVirt Engine deployment
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ ovirt_engine_setup_package_list }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/main.yml
new file mode 100644
index 000000000..00f4ddef4
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: Perform pre-install checks
+ include_tasks: pre_install_checks.yml
+
+- name: Install required packages for oVirt Engine deployment
+ include_tasks: install_packages.yml
+ when: not ovirt_engine_setup_perform_upgrade|bool and not ovirt_engine_setup_offline|bool
+
+- name: Run engine setup
+ include_tasks: engine_setup.yml
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/pre_install_checks.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/pre_install_checks.yml
new file mode 100644
index 000000000..2f8289a23
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/pre_install_checks.yml
@@ -0,0 +1,12 @@
+---
+- name: Gather facts on installed packages
+ ansible.builtin.package_facts:
+ manager: rpm
+ no_log: true
+
+- name: Fail when firewall manager is not installed
+ ansible.builtin.fail:
+ msg: '{{ ovirt_engine_setup_firewall_manager }} was chosen as a firewall manager but is not installed'
+ when:
+ - ovirt_engine_setup_firewall_manager not in ansible_facts.packages
+ - ovirt_engine_setup_firewall_manager is not none
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/restore_engine_from_file.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/restore_engine_from_file.yml
new file mode 100644
index 000000000..ecbc0faa7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/restore_engine_from_file.yml
@@ -0,0 +1,20 @@
+---
+- name: Run engine cleanup command
+ ansible.builtin.command: "engine-cleanup"
+ when: ovirt_engine_setup_restore_engine_cleanup
+
+- name: Add scopes to restore engine command
+ ansible.builtin.set_fact:
+ restore_cmd: "{{ restore_cmd }} --scope={{ item }}"
+ with_items: "{{ ovirt_engine_setup_restore_scopes | default([]) }}"
+
+- name: Add restore file and restore options
+ ansible.builtin.set_fact:
+ restore_cmd: "{{ restore_cmd }} --{{ item.key }}{% if item.value %}={{ item.value }}{% endif %}"
+ with_dict:
+ - file: "{{ ovirt_engine_setup_restore_file }}"
+ - "{{ ovirt_engine_setup_restore_options | default({}) }}"
+
+- name: Run restore engine from backup file
+ ansible.builtin.command: "{{ restore_cmd }}"
+ changed_when: true
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_basic.txt.j2 b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_basic.txt.j2
new file mode 100644
index 000000000..adf48ee4a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_basic.txt.j2
@@ -0,0 +1,3 @@
+{% include "./templates/basic_answerfile.txt.j2" %}
+OVESETUP_DB/engineVacuumFull=bool:{{ ovirt_engine_setup_engine_vacuum_full }}
+OVESETUP_DB/dwhVacuumFull=bool:{{ ovirt_engine_setup_dwh_vacuum_full }}
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_upgrade.txt.j2 b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_upgrade.txt.j2
new file mode 100644
index 000000000..a9380c830
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_upgrade.txt.j2
@@ -0,0 +1 @@
+{% include "./templates/basic_answerfile.txt.j2" %}
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_basic.txt.j2 b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_basic.txt.j2
new file mode 100644
index 000000000..b39bf0a6d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_basic.txt.j2
@@ -0,0 +1,14 @@
+{% include "./templates/basic_answerfile.txt.j2" %}
+OVESETUP_DB/engineVacuumFull=bool:{{ ovirt_engine_setup_engine_vacuum_full }}
+OVESETUP_DB/dwhVacuumFull=bool:{{ ovirt_engine_setup_dwh_vacuum_full }}
+{% if ovirt_engine_setup_provider_ovn_configure is defined and ovirt_engine_setup_provider_ovn_configure %}
+OVESETUP_OVN/ovirtProviderOvn=bool:True
+OVESETUP_OVN/ovirtProviderOvnUser=str:{{ ovirt_engine_setup_provider_ovn_username }}
+{% else %}
+OVESETUP_OVN/ovirtProviderOvn=bool:False
+{% endif %}
+{% if ovirt_engine_setup_provider_ovn_password is defined %}
+OVESETUP_OVN/ovirtProviderOvnPassword=str:{{ ovirt_engine_setup_provider_ovn_password }}
+{% else %}
+OVESETUP_OVN/ovirtProviderOvnPassword=none:None
+{% endif %}
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_upgrade.txt.j2 b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_upgrade.txt.j2
new file mode 100644
index 000000000..86e6d920b
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_upgrade.txt.j2
@@ -0,0 +1,16 @@
+{% include "./templates/basic_answerfile.txt.j2" %}
+{% if ovirt_engine_setup_provider_ovn_configure is defined and ovirt_engine_setup_provider_ovn_configure %}
+OVESETUP_DB/engineVacuumFull=bool:True
+OVESETUP_OVN/ovirtProviderOvn=bool:True
+OVESETUP_OVN/ovirtProviderOvnUser=str:{{ ovirt_engine_setup_provider_ovn_username }}
+{% else %}
+OVESETUP_OVN/ovirtProviderOvn=bool:False
+{% endif %}
+{% if ovirt_engine_setup_provider_ovn_password is defined %}
+OVESETUP_OVN/ovirtProviderOvnPassword=str:{{ ovirt_engine_setup_provider_ovn_password }}
+{% else %}
+OVESETUP_OVN/ovirtProviderOvnPassword=none:None
+{% endif %}
+{% if ovirt_engine_setup_wait_running_tasks is defined %}
+QUESTION/1/OVESETUP_WAIT_RUNNING_TASKS=str:{{ ovirt_engine_setup_wait_running_tasks | ternary('yes','no') }}
+{% endif %}
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_basic.txt.j2 b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_basic.txt.j2
new file mode 100644
index 000000000..d3280daaa
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_basic.txt.j2
@@ -0,0 +1,11 @@
+{% include "./templates/answerfile_4.2_basic.txt.j2" %}
+{% if ovirt_engine_cinderlib_enable is defined %}
+QUESTION/1/ovirt-cinderlib-enable=str:{{ ovirt_engine_cinderlib_enable | ternary('yes','no') }}
+{% endif %}
+{% if ovirt_engine_apache_config_root_redirection is defined %}
+QUESTION/1/OVESETUP_APACHE_CONFIG_ROOT_REDIRECTION=str:{{ ovirt_engine_apache_config_root_redirection | ternary('yes','no') }}
+{% endif %}
+{% if ovirt_engine_apache_config_ssl is defined %}
+QUESTION/1/OVESETUP_APACHE_CONFIG_SSL=str:{{ ovirt_engine_apache_config_ssl | ternary('automatic','manual') }}
+{% endif %}
+QUESTION/1/OVESETUP_IGNORE_SNAPSHOTS_WITH_OLD_COMPAT_LEVEL=str:yes
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_upgrade.txt.j2 b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_upgrade.txt.j2
new file mode 100644
index 000000000..46771fe07
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_upgrade.txt.j2
@@ -0,0 +1 @@
+{% include "./templates/answerfile_4.2_upgrade.txt.j2" %}
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_basic.txt.j2 b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_basic.txt.j2
new file mode 100644
index 000000000..1ac711274
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_basic.txt.j2
@@ -0,0 +1,4 @@
+{% include "./templates/answerfile_4.3_basic.txt.j2" %}
+
+OVESETUP_GRAFANA_CORE/enable=bool:{{ ovirt_engine_grafana_enable }}
+QUESTION/1/OVESETUP_SKIP_RENEW_PKI_CONFIRM=str:{{ ovirt_engine_setup_skip_renew_pki_confirm | ternary('yes','no') }}
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_upgrade.txt.j2 b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_upgrade.txt.j2
new file mode 100644
index 000000000..333fc039f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_upgrade.txt.j2
@@ -0,0 +1,2 @@
+{% include "./templates/answerfile_4.3_upgrade.txt.j2" %}
+QUESTION/1/OVESETUP_SKIP_RENEW_PKI_CONFIRM=str:{{ ovirt_engine_setup_skip_renew_pki_confirm | ternary('yes','no') }}
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.5_basic.txt.j2 b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.5_basic.txt.j2
new file mode 100644
index 000000000..f70502806
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.5_basic.txt.j2
@@ -0,0 +1 @@
+{% include "./templates/answerfile_4.4_basic.txt.j2" %}
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.5_upgrade.txt.j2 b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.5_upgrade.txt.j2
new file mode 100644
index 000000000..28a992027
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.5_upgrade.txt.j2
@@ -0,0 +1 @@
+{% include "./templates/answerfile_4.4_upgrade.txt.j2" %}
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/basic_answerfile.txt.j2 b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/basic_answerfile.txt.j2
new file mode 100644
index 000000000..c4667d356
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/basic_answerfile.txt.j2
@@ -0,0 +1,64 @@
+[environment:default]
+{% if ovirt_engine_setup_fqdn is defined %}
+OVESETUP_CONFIG/fqdn=str:{{ ovirt_engine_setup_fqdn }}
+{% endif %}
+{% if ovirt_engine_setup_firewall_manager %}
+OVESETUP_CONFIG/updateFirewall=bool:True
+OVESETUP_CONFIG/firewallManager=str:{{ ovirt_engine_setup_firewall_manager }}
+{% else %}
+OVESETUP_CONFIG/updateFirewall=bool:False
+OVESETUP_CONFIG/firewallManager=none:None
+{% endif %}
+{% if ovirt_engine_setup_require_rollback is defined %}
+OSETUP_RPMDISTRO/requireRollback=bool:{{ovirt_engine_setup_require_rollback}}
+{% else %}
+OSETUP_RPMDISTRO/requireRollback=none:None
+{% endif %}
+OVESETUP_DB/host=str:{{ovirt_engine_setup_db_host}}
+OVESETUP_DB/user=str:{{ovirt_engine_setup_db_user}}
+OVESETUP_SYSTEM/memCheckEnabled=bool:False
+{% if ovirt_engine_setup_db_password is defined %}
+OVESETUP_DB/password=str:{{ovirt_engine_setup_db_password}}
+{% else %}
+OVESETUP_DB/password=none:None
+{% endif %}
+OVESETUP_DB/database=str:{{ovirt_engine_setup_db_name}}
+OVESETUP_DB/port=int:{{ovirt_engine_setup_db_port}}
+{% if ovirt_engine_setup_dwh_db_configure is defined and ovirt_engine_setup_dwh_db_configure %}
+OVESETUP_DWH_DB/secured=bool:False
+OVESETUP_DWH_DB/host=str:{{ovirt_engine_setup_dwh_db_host}}
+OVESETUP_DWH_DB/user=str:{{ovirt_engine_setup_dwh_db_user}}
+OVESETUP_DWH_DB/database=str:{{ovirt_engine_setup_dwh_db_name}}
+OVESETUP_DWH_DB/port=int:{{ovirt_engine_setup_dwh_db_port}}
+{% else %}
+OVESETUP_DWH_DB/secured=none:None
+OVESETUP_DWH_DB/host=none:None
+OVESETUP_DWH_DB/user=none:None
+OVESETUP_DWH_DB/password=none:None
+OVESETUP_DWH_DB/database=none:None
+OVESETUP_DWH_DB/port=none:None
+{% endif %}
+{% if ovirt_engine_setup_dwh_db_password is defined %}
+OVESETUP_DWH_DB/password=str:{{ovirt_engine_setup_dwh_db_password}}
+{% else %}
+OVESETUP_DWH_DB/password=none:None
+{% endif %}
+{% if ovirt_engine_setup_dwh_db_configure is defined and ovirt_engine_setup_dwh_db_configure %}
+OVESETUP_DWH_DB/securedHostValidation=bool:False
+{% else %}
+OVESETUP_DWH_DB/securedHostValidation=none:None
+{% endif %}
+{% if ovirt_engine_setup_organization is defined %}
+OVESETUP_PKI/organization=str:{{ ovirt_engine_setup_organization }}
+{% endif %}
+OVESETUP_CONFIG/adminPassword=str:{{ ovirt_engine_setup_admin_password }}
+{% if ovirt_engine_setup_dwh_db_configure is defined and ovirt_engine_setup_dwh_db_configure %}
+OVESETUP_DWH_CORE/enable=bool:True
+{% else %}
+OVESETUP_DWH_CORE/enable=bool:False
+{% endif %}
+{% if ovirt_engine_setup_dwh_db_configure is defined and ovirt_engine_setup_dwh_db_configure %}
+OVESETUP_DWH_PROVISIONING/postgresProvisioningEnabled=bool:True
+{% else %}
+OVESETUP_DWH_PROVISIONING/postgresProvisioningEnabled=bool:False
+{% endif %}
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/containers-deploy.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/containers-deploy.yml
new file mode 100644
index 000000000..f40955a8d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/containers-deploy.yml
@@ -0,0 +1,17 @@
+---
+- name: Bring up docker containers
+ hosts: localhost
+ gather_facts: false
+ roles:
+ - role: provision_docker
+ provision_docker_inventory_group: "{{ groups['engine'] }}"
+
+- name: "Update python because of ovirt-imageio-proxy"
+ hosts: engine
+ tasks:
+ - name: Update python
+ yum:
+ name: python-libs
+ state: latest
+ tags:
+ - skip_ansible_lint # ANSIBLE0010
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-deploy.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-deploy.yml
new file mode 100644
index 000000000..3a746db10
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-deploy.yml
@@ -0,0 +1,17 @@
+---
+- name: Run ovirt-ansible roles on containerized environments
+ hosts: engine
+ vars_files:
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: "{{ ovirt_engine_setup_version }}"
+ ovirt_engine_setup_organization: "example.com"
+ ovirt_engine_setup_dwh_db_configure: true
+ ovirt_engine_setup_dwh_db_host: "localhost"
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_repositories_ovirt_release_rpm: "{{ ovirt_release_rpm }}"
+ roles:
+ - role: repositories
+ - role: engine_setup
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-upgrade.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-upgrade.yml
new file mode 100644
index 000000000..87c5f06c2
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-upgrade.yml
@@ -0,0 +1,17 @@
+---
+- name: Run ovirt-ansible roles on containerized environments
+ hosts: engine
+ vars_files:
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: "{{ ovirt_engine_setup_version }}"
+ ovirt_engine_setup_organization: "example.com"
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_engine_setup_dwh_db_configure: false
+ ovirt_repositories_ovirt_release_rpm: "{{ ovirt_release_rpm }}"
+ roles:
+ - role: repositories
+ - role: engine_setup
+ ovirt_engine_setup_answer_file_path: "answerfile_{{ ovirt_engine_setup_version }}_upgrade.txt.j2"
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/inventory b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/inventory
new file mode 100644
index 000000000..9a315cacb
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/inventory
@@ -0,0 +1,4 @@
+localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python"
+
+[engine]
+engine_centos7 image="katerinak/c7-systemd-utf8"
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/passwords.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/passwords.yml
new file mode 100644
index 000000000..80d6df40f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+ovirt_engine_setup_admin_password: 123456
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/requirements.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/requirements.yml
new file mode 100644
index 000000000..159e73f9c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/requirements.yml
@@ -0,0 +1,4 @@
+---
+- src: chrismeyersfsu.provision_docker
+ name: provision_docker
+- src: oVirt.repositories
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-4.2.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-4.2.yml
new file mode 100644
index 000000000..206346346
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-4.2.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: containers-deploy.yml
+- import_playbook: engine-deploy.yml
+ vars:
+ ovirt_engine_setup_version: "4.2"
+ ovirt_release_rpm: "http://plain.resources.ovirt.org/pub/yum-repo/ovirt-release42.rpm"
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-master.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-master.yml
new file mode 100644
index 000000000..886abf0e8
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-master.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: containers-deploy.yml
+- import_playbook: engine-deploy.yml
+ vars:
+ ovirt_engine_setup_version: "4.5"
+ ovirt_release_rpm: "http://plain.resources.ovirt.org/pub/yum-repo/ovirt-release-master.rpm"
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-upgrade-4.2-to-master.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-upgrade-4.2-to-master.yml
new file mode 100644
index 000000000..32516116b
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-upgrade-4.2-to-master.yml
@@ -0,0 +1,10 @@
+---
+- import_playbook: containers-deploy.yml
+- import_playbook: engine-deploy.yml
+ vars:
+ ovirt_engine_setup_version: "4.2"
+ ovirt_release_rpm: "http://plain.resources.ovirt.org/pub/yum-repo/ovirt-release42.rpm"
+- import_playbook: engine-upgrade.yml
+ vars:
+ ovirt_engine_setup_version: "4.3"
+ ovirt_release_rpm: "http://plain.resources.ovirt.org/pub/yum-repo/ovirt-release-master.rpm"
diff --git a/ansible_collections/ovirt/ovirt/roles/engine_setup/vars/main.yml b/ansible_collections/ovirt/ovirt/roles/engine_setup/vars/main.yml
new file mode 100644
index 000000000..e5e727253
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/engine_setup/vars/main.yml
@@ -0,0 +1,2 @@
+---
+restore_cmd: 'engine-backup --mode=restore'
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/README.md b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/README.md
new file mode 100644
index 000000000..af991eb4c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/README.md
@@ -0,0 +1,393 @@
+# ovirt-ansible-hosted-engine-setup
+
+Ansible role for deploying oVirt Hosted-Engine
+
+# Requirements
+
+Ansible Core >= 2.12.0 and < 2.13.0
+
+# Prerequisites
+
+* A fully qualified domain name prepared for your Engine and the host. Forward and reverse lookup records must both be set in the DNS.
+* `/var/tmp` has at least 5 GB of free space.
+* Unless you are using Gluster, you must have prepared storage for your Hosted-Engine environment (choose one):
+ * [Prepare NFS Storage](https://ovirt.org/documentation/admin-guide/chap-Storage/#preparing-nfs-storage)
+ * [Prepare ISCSI Storage](https://ovirt.org/documentation/admin-guide/chap-Storage/#preparing-iscsi-storage)
+
+# Role variables
+
+## General Variables
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| he_bridge_if | eth0 | The network interface ovirt management bridge will be configured on |
+| he_fqdn | null | The engine FQDN as it configured on the DNS |
+| he_mem_size_MB | max | The amount of memory used on the engine VM |
+| he_reserved_memory_MB | 512 | The amount of memory reserved for the host |
+| he_vcpus | max | The amount of CPUs used on the engine VM |
+| he_disk_size_GB | 61 | Disk size of the engine VM |
+| he_vm_mac_addr | null | MAC address of the engine vm network interface. |
+| he_domain_type | null | Storage domain type. available options: *nfs*, *iscsi*, *glusterfs*, *fc* |
+| he_storage_domain_addr | null | Storage domain IP/DNS address |
+| he_ansible_host_name | localhost | hostname in use on the first HE host (not necessarily the Ansible controller one) |
+| he_restore_from_file | null | a backup file created with engine-backup to be restored on the fly |
+| he_pki_renew_on_restore | false | Renew engine PKI on restore if needed |
+| he_enable_keycloak | true | Configure keycloak on the engine if possible |
+| he_cluster | Default | name of the cluster with hosted-engine hosts |
+| he_cluster_cpu_type | null | cluster CPU type to be used in hosted-engine cluster (the same as HE host or lower) |
+| he_cluster_comp_version | null | Compatibility version of the hosted-engine cluster. Default value is the latest compatibility version |
+| he_data_center | Default | name of the datacenter with hosted-engine hosts |
+| he_data_center_comp_version | null | Compatibility version of the hosted-engine data center. Default value is the latest compatibility version |
+| he_host_name | $(hostname -f) | Human readable name used by the engine for the first host |
+| he_host_address | $(hostname -f) | FQDN or IP address used by the engine for the first host |
+| he_bridge_if | null | interface used for the management bridge |
+| he_force_ip4 | false | Force resolving engine FQDN to ipv4 only using DNS server |
+| he_force_ip6 | false | Force resolving engine FQDN to ipv6 only using DNS server |
+| he_apply_openscap_profile | false | Apply an OpenSCAP security profile on HE VM |
+| he_openscap_profile_name | stig | OpenSCAP profile name, available options: *stig*, *pci-dss*. Requires `he_apply_openscap_profile` to be `True` |
+| he_enable_fips | false | Enable FIPS on HE VM |
+| he_network_test | dns | the way of the network connectivity check performed by ovirt-hosted-engine-ha and ovirt-hosted-engine-setup, available options: *dns*, *ping*, *tcp* or *none*. |
+| he_tcp_t_address | null | hostname to connect if he_network_test is *tcp* |
+| he_tcp_t_port | null | port to connect if he_network_test is *tcp* |
+| he_pause_host | false | Pause the execution to let the user interactively fix host configuration |
+| he_pause_after_failed_add_host | true | Pause the execution if Add Host failed with status non_operational, to let the user interactively fix host configuration |
+| he_pause_after_failed_restore | true | Pause the execution if engine-backup --mode=restore failed, to let the user handle this manually |
+| he_pause_before_engine_setup | false | Pause the execution and allow the user to make configuration changes to the bootstrap engine VM before running `engine-setup` |
+| he_offline_deployment | false | If `True`, updates for all packages will be disabled |
+| he_additional_package_list | [] | List of additional packages to be installed on engine VM apart from ovirt-engine package |
+| he_debug_mode | false | If `True`, HE deployment will execute additional tasks for debug |
+| he_db_password | UNDEF | Engine database password |
+| he_dwh_db_password | UNDEF | DWH database password |
+
+## NFS / Gluster Variables
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| he_mount_options | '' | NFS mount options
+| he_storage_domain_path | null | shared folder path on NFS server |
+| he_nfs_version | auto | NFS version. available options: *auto*, *v4*, *v3*, *v4_0*, *v4_1*, *v4_2*
+| he_storage_if | null | the network interface name that is connected to the storage network, assumed to be pre-configured|
+
+
+## ISCSI Variables
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| he_iscsi_username | null | iscsi username |
+| he_iscsi_password | null | iscsi password |
+| he_iscsi_target | null | iscsi target |
+| he_lun_id | null | Lun ID |
+| he_iscsi_portal_port | null | iscsi portal port |
+| he_iscsi_portal_addr | null | iscsi portal address (just for interactive iSCSI discovery, use he_storage_domain_addr for the deployment) |
+| he_iscsi_tpgt | null | iscsi tpgt |
+| he_discard | false | Discard the whole disk space when removed. more info [here](https://ovirt.org/develop/release-management/features/storage/discard-after-delete/)
+
+## Static IP configuration Variables
+
+DHCP configuration is used on the engine VM by default. However, if you would like to use static ip instead,
+define the following variables:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| he_vm_ip_addr | null | engine VM ip address |
+| he_vm_ip_prefix | null | engine VM ip prefix |
+| he_dns_addr | null | engine VM DNS server |
+| he_gateway | null | engine VM default gateway |
+| he_vm_etc_hosts | false | Add engine VM ip and fqdn to /etc/hosts on the host |
+
+# Example Playbook
+This is a simple example for deploying Hosted-Engine with NFS storage domain.
+
+This role can be used to deploy on localhost (the ansible controller one) or on a remote host (please correctly set he_ansible_host_name).
+All the playbooks can be found inside the `examples/` folder.
+
+## hosted_engine_deploy_localhost.yml
+
+```yml
+---
+- name: Deploy oVirt hosted engine
+ hosts: localhost
+ connection: local
+ roles:
+ - role: hosted_engine_setup
+ collections:
+ - ovirt.ovirt
+```
+
+## hosted_engine_deploy_remotehost.yml
+
+```yml
+---
+- name: Deploy oVirt hosted engine
+ hosts: host123.localdomain
+ roles:
+ - role: hosted_engine_setup
+ collections:
+ - ovirt.ovirt
+```
+
+## passwords.yml
+
+```yml
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+he_appliance_password: 123456
+he_admin_password: 123456
+```
+
+## Example 1: extra vars for NFS deployment with DHCP - he_deployment.json
+
+```json
+{
+ "he_bridge_if": "eth0",
+ "he_fqdn": "he-engine.example.com",
+ "he_vm_mac_addr": "00:a5:3f:66:ba:12",
+ "he_domain_type": "nfs",
+ "he_storage_domain_addr": "192.168.100.50",
+ "he_storage_domain_path": "/var/nfs_folder"
+}
+```
+
+## Example 2: extra vars for iSCSI deployment with static IP, remote host - he_deployment_remote.json
+
+```json
+{
+ "he_bridge_if": "eth0",
+ "he_fqdn": "he-engine.example.com",
+ "he_vm_ip_addr": "192.168.1.214",
+ "he_vm_ip_prefix": "24",
+ "he_gateway": "192.168.1.1",
+ "he_dns_addr": "192.168.1.1",
+ "he_vm_etc_hosts": true,
+ "he_vm_mac_addr": "00:a5:3f:66:ba:12",
+ "he_domain_type": "iscsi",
+ "he_storage_domain_addr": "192.168.1.125",
+ "he_iscsi_portal_port": "3260",
+ "he_iscsi_tpgt": "1",
+ "he_iscsi_target": "iqn.2017-10.com.redhat.stirabos:he",
+ "he_lun_id": "36589cfc000000e8a909165bdfb47b3d9",
+ "he_mem_size_MB": "4096",
+ "he_ansible_host_name": "host123.localdomain"
+}
+```
+
+### Test iSCSI connectivity and get LUN WWID before deploying
+
+```
+[root@c75he20180820h1 ~]# iscsiadm -m node --targetname iqn.2017-10.com.redhat.stirabos:he -p 192.168.1.125:3260 -l
+[root@c75he20180820h1 ~]# iscsiadm -m session -P3
+iSCSI Transport Class version 2.0-870
+version 6.2.0.874-7
+Target: iqn.2017-10.com.redhat.stirabos:data (non-flash)
+ Current Portal: 192.168.1.125:3260,1
+ Persistent Portal: 192.168.1.125:3260,1
+ **********
+ Interface:
+ **********
+ Iface Name: default
+ Iface Transport: tcp
+ Iface Initiatorname: iqn.1994-05.com.redhat:6a4517b3773a
+ Iface IPaddress: 192.168.1.14
+ Iface HWaddress: <empty>
+ Iface Netdev: <empty>
+ SID: 1
+ iSCSI Connection State: LOGGED IN
+ iSCSI Session State: LOGGED_IN
+ Internal iscsid Session State: NO CHANGE
+ *********
+ Timeouts:
+ *********
+ Recovery Timeout: 5
+ Target Reset Timeout: 30
+ LUN Reset Timeout: 30
+ Abort Timeout: 15
+ *****
+ CHAP:
+ *****
+ username: <empty>
+ password: ********
+ username_in: <empty>
+ password_in: ********
+ ************************
+ Negotiated iSCSI params:
+ ************************
+ HeaderDigest: None
+ DataDigest: None
+ MaxRecvDataSegmentLength: 262144
+ MaxXmitDataSegmentLength: 131072
+ FirstBurstLength: 131072
+ MaxBurstLength: 16776192
+ ImmediateData: Yes
+ InitialR2T: Yes
+ MaxOutstandingR2T: 1
+ ************************
+ Attached SCSI devices:
+ ************************
+ Host Number: 3 State: running
+ scsi3 Channel 00 Id 0 Lun: 2
+ Attached scsi disk sdb State: running
+ scsi3 Channel 00 Id 0 Lun: 3
+ Attached scsi disk sdc State: running
+Target: iqn.2017-10.com.redhat.stirabos:he (non-flash)
+ Current Portal: 192.168.1.125:3260,1
+ Persistent Portal: 192.168.1.125:3260,1
+ **********
+ Interface:
+ **********
+ Iface Name: default
+ Iface Transport: tcp
+ Iface Initiatorname: iqn.1994-05.com.redhat:6a4517b3773a
+ Iface IPaddress: 192.168.1.14
+ Iface HWaddress: <empty>
+ Iface Netdev: <empty>
+ SID: 4
+ iSCSI Connection State: LOGGED IN
+ iSCSI Session State: LOGGED_IN
+ Internal iscsid Session State: NO CHANGE
+ *********
+ Timeouts:
+ *********
+ Recovery Timeout: 5
+ Target Reset Timeout: 30
+ LUN Reset Timeout: 30
+ Abort Timeout: 15
+ *****
+ CHAP:
+ *****
+ username: <empty>
+ password: ********
+ username_in: <empty>
+ password_in: ********
+ ************************
+ Negotiated iSCSI params:
+ ************************
+ HeaderDigest: None
+ DataDigest: None
+ MaxRecvDataSegmentLength: 262144
+ MaxXmitDataSegmentLength: 131072
+ FirstBurstLength: 131072
+ MaxBurstLength: 16776192
+ ImmediateData: Yes
+ InitialR2T: Yes
+ MaxOutstandingR2T: 1
+ ************************
+ Attached SCSI devices:
+ ************************
+ Host Number: 6 State: running
+ scsi6 Channel 00 Id 0 Lun: 0
+ Attached scsi disk sdd State: running
+ scsi6 Channel 00 Id 0 Lun: 1
+ Attached scsi disk sde State: running
+[root@c75he20180820h1 ~]# lsblk /dev/sdd
+NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
+sdd 8:48 0 100G 0 disk
+└─36589cfc000000e8a909165bdfb47b3d9 253:10 0 100G 0 mpath
+[root@c75he20180820h1 ~]# lsblk /dev/sde
+NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
+sde 8:64 0 10G 0 disk
+└─36589cfc000000ab67ee1427370d68436 253:0 0 10G 0 mpath
+[root@c75he20180820h1 ~]# /lib/udev/scsi_id --page=0x83 --whitelisted --device=/dev/sdd
+36589cfc000000e8a909165bdfb47b3d9
+[root@c75he20180820h1 ~]# iscsiadm -m node --targetname iqn.2017-10.com.redhat.stirabos:he -p 192.168.1.125:3260 -u
+Logging out of session [sid: 4, target: iqn.2017-10.com.redhat.stirabos:he, portal: 192.168.1.125,3260]
+Logout of [sid: 4, target: iqn.2017-10.com.redhat.stirabos:he, portal: 192.168.1.125,3260] successful.
+```
+
+# Usage
+1. Check all the prerequisites and requirements are met.
+2. Encrypt passwords.yml
+```sh
+$ ansible-vault encrypt passwords.yml
+```
+
+3. Execute the playbook
+
+Local deployment:
+```sh
+$ ansible-playbook hosted_engine_deploy.yml --extra-vars='@he_deployment.json' --extra-vars='@passwords.yml' --ask-vault-pass
+```
+
+Deployment over a remote host:
+```sh
+ansible-playbook -i host123.localdomain, hosted_engine_deploy.yml --extra-vars='@he_deployment.json' --extra-vars='@passwords.yml' --ask-vault-pass
+```
+
+Deploy over a remote host from Ansible AWX/Tower
+---
+
+The flow creates a temporary VM with a running engine to use for configuring and bootstrapping the whole environment.
+The bootstrap engine VM runs over libvirt natted network so, in that stage, is not reachable from outside the host where it's running on.
+
+When the role dynamically adds the freshly created engine VM to the inventory, it also configures the host to be used as an ssh proxy and this perfectly works directly running the playbook with ansible-playbook.
+On the other side, Ansible AWX/Tower by defaults relies on PRoot to isolate jobs and so the credentials supplied by AWX/Tower will not flow to the jump host configured with ProxyCommand.
+
+[This can be avoided disabling job isolation in AWX/Tower](https://docs.ansible.com/ansible-tower/latest/html/administration/tipsandtricks.html#setting-up-a-jump-host-to-use-with-tower)
+
+Please notice that *job isolation* can be configured system wise but not only for the HE deploy job and so it's not a recommended practice on production environments.
+
+Deployment time improvements
+---
+
+To significantly reduce the amount of time it takes to deploy a hosted engine __over a remote host__, add the following lines to `/etc/ansible/ansible.cfg` under the `[ssh_connection]` section:
+
+```
+ssh_args = -C -o ControlMaster=auto -o ControlPersist=30m
+control_path_dir = /root/cp
+control_path = %(directory)s/%%h-%%r
+pipelining = True
+```
+
+Make changes in the engine VM during the deployment
+---
+In some cases, a user may want to make adjustments to the engine VM
+during the deployment process. There are 2 ways to do that:
+
+**Automatic:**
+
+Write ansible playbooks that will run on the engine VM before or after the engine VM installation.
+
+You can add the playbooks to the following locations:
+
+- ```hooks/enginevm_before_engine_setup```: These will be ran before running engine-setup on the engine machine.
+
+- ```hooks/enginevm_after_engine_setup```: These will be ran after running engine-setup on the engine machine.
+
+- ```hooks/after_add_host```: These will be ran after adding the host to the engine, but before checking if it is up. You can place here playbooks to customize the host, such as configuring required networks, and then activate it, so that deployment will find it as "Up" and continue successfully. See examples/required_networks_fix.yml for an example.
+
+These playbooks will be consumed automatically by the role when you execute it.
+
+**Manual:**
+
+To make manual adjustments set the following variables to `true`:
+- `he_pause_before_engine_setup` - This will pause the deployment **before** running engine-setup, and before restoring, when using `he_restore_from_file`.
+- `he_pause_host` - This will pause the deployment **after** the engine has been setup.
+
+Set these variables to `true` will create a lock-file at /tmp that ends with `_he_setup_lock` on the machine the role was executed on. The deployment will continue after deleting the lock-file, or after 24 hours ( if the lock-file hasn't been removed ).
+
+In order to proceed with the deployment, before deleting the lock-file, make sure that the host is on 'up' state at the engine's URL.
+
+Both of the lock-file path and the engine's URL will be presented during the role execution.
+
+**On Failure**
+
+If "Add Host" failed and left the host in status "non_operational", by default the deployment will be paused, similarly to "Manual" above, so that the user can try to fix the host to get it to "up" state, before removing the lock file and continuing. If you want the process to fail instead of pausing, set `he_pause_after_failed_add_host` to false.
+
+If `engine-backup --mode=restore` failed, by default the deployment will be paused, to let the user handle this manually. If you want the process to fail instead of pausing, set `he_pause_after_failed_restore` to false.
+
+Demo
+----
+Here a demo showing a deployment on NFS configuring the engine VM with static IP.
+[![asciicast](https://asciinema.org/a/205639.png)](https://asciinema.org/a/205639)
+
+# License
+
+Apache License 2.0
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/defaults/main.yml
new file mode 100644
index 000000000..02f9f746e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/defaults/main.yml
@@ -0,0 +1,119 @@
+---
+# Default vars
+# Do not change these variables
+# Changes in this section are NOT supported
+
+he_cmd_lang:
+ LANGUAGE: en_US.UTF-8
+ LANG: en_US.UTF-8
+ LC_MESSAGES: en_US.UTF-8
+ LC_ALL: en_US.UTF-8
+
+he_vm_name: HostedEngine
+he_data_center: Default
+he_cluster: Default
+he_local_vm_dir_path: /var/tmp
+he_local_vm_dir_prefix: localvm
+he_appliance_ova: ''
+he_root_ssh_pubkey: ''
+he_root_ssh_access: 'yes'
+he_apply_openscap_profile: false
+he_openscap_profile_name: stig
+he_enable_fips: false
+he_cdrom: ''
+he_console_type: vnc
+he_video_device: vga
+he_graphic_device: vnc
+he_emulated_machine: pc
+he_minimal_mem_size_MB: 4096
+he_minimal_disk_size_GB: 50
+he_mgmt_network: ovirtmgmt
+he_storage_domain_name: hosted_storage
+he_ansible_host_name: localhost
+he_ipv4_subnet_prefix: "192.168.222"
+he_ipv6_subnet_prefix: fd00:1234:5678:900
+he_webui_forward_port: 6900 # by default already open for VM console
+he_reserved_memory_MB: 512
+he_avail_memory_grace_MB: 200
+
+engine_psql: /usr/share/ovirt-engine/dbscripts/engine-psql.sh
+
+he_host_ip: null
+he_host_name: null
+he_host_address: null
+he_cloud_init_host_name: null
+he_cloud_init_domain_name: null
+
+he_smtp_port: 25
+he_smtp_server: localhost
+he_dest_email: root@localhost
+he_source_email: root@localhost
+
+he_force_ip4: false
+he_force_ip6: false
+
+he_pause_before_engine_setup: false
+he_pause_host: false
+he_pause_after_failed_add_host: true
+he_pause_after_failed_restore: true
+he_debug_mode: false
+
+## Mandatory variables:
+
+he_bridge_if: null
+he_fqdn: null
+he_mem_size_MB: max
+he_vcpus: max
+he_disk_size_GB: 61
+
+he_enable_libgfapi: false
+he_enable_hc_gluster_service: false
+he_vm_mac_addr: null
+he_remove_appliance_rpm: true
+he_pki_renew_on_restore: false
+he_enable_keycloak: true
+
+## Storage domain vars:
+he_domain_type: null # can be: nfs | iscsi | glusterfs | fc
+he_storage_domain_addr: null
+
+## NFS vars:
+## Defaults are null, user should specify if NFS is chosen
+he_mount_options: ''
+he_storage_domain_path: null
+he_nfs_version: auto # can be: auto, v4 or v3
+he_storage_if: null
+
+## ISCSI vars:
+## Defaults are null, user should specify if ISCSI is chosen
+he_iscsi_username: null
+he_iscsi_password: null
+he_iscsi_discover_username: null
+he_iscsi_discover_password: null
+he_iscsi_target: null
+he_lun_id: null
+he_iscsi_portal_port: null
+he_iscsi_portal_addr: null
+he_iscsi_tpgt: null
+he_discard: false
+
+# Define if using STATIC ip configuration
+he_vm_ip_addr: null
+he_vm_ip_prefix: null
+he_dns_addr: null # up to 3 DNS servers IPs can be added
+he_vm_etc_hosts: false # user can add lines to /etc/hosts on the engine VM
+he_gateway: null
+he_network_test: 'dns' # can be: 'dns', 'ping', 'tcp' or 'none'
+he_tcp_t_address: null
+he_tcp_t_port: null
+
+# ovirt-hosted-engine-setup variables
+he_just_collect_network_interfaces: false
+he_libvirt_authfile: '/etc/ovirt-hosted-engine/virsh_auth.conf'
+he_offline_deployment: false
+he_additional_package_list: []
+
+# *** Do Not Use On Production Environment ***
+# ********** Used for testing ONLY ***********
+he_requirements_check_enabled: true
+he_memory_requirements_check_enabled: true
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_localhost.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_localhost.yml
new file mode 100644
index 000000000..53f72801c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_localhost.yml
@@ -0,0 +1,8 @@
+---
+- name: Deploy oVirt hosted engine
+ hosts: localhost
+ connection: local
+ roles:
+ - role: hosted_engine_setup
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_remotehost.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_remotehost.yml
new file mode 100644
index 000000000..51aaa0e1e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_remotehost.yml
@@ -0,0 +1,7 @@
+---
+- name: Deploy oVirt hosted engine
+ hosts: host123.localdomain
+ roles:
+ - role: hosted_engine_setup
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/iscsi_deployment_remote.json b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/iscsi_deployment_remote.json
new file mode 100644
index 000000000..d8df1baf0
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/iscsi_deployment_remote.json
@@ -0,0 +1,18 @@
+{
+ "he_bridge_if": "eth0",
+ "he_fqdn": "he-engine.example.com",
+ "he_vm_ip_addr": "192.168.1.214",
+ "he_vm_ip_prefix": "24",
+ "he_gateway": "192.168.1.1",
+ "he_dns_addr": "192.168.1.1",
+ "he_vm_etc_hosts": true,
+ "he_vm_mac_addr": "00:a5:3f:66:ba:12",
+ "he_domain_type": "iscsi",
+ "he_storage_domain_addr": "192.168.1.125",
+ "he_iscsi_portal_port": "3260",
+ "he_iscsi_tpgt": "1",
+ "he_iscsi_target": "iqn.2017-10.com.redhat.stirabos:he",
+ "he_lun_id": "36589cfc000000e8a909165bdfb47b3d9",
+ "he_mem_size_MB": "4096",
+ "he_ansible_host_name": "host123.localdomain"
+}
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/nfs_deployment.json b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/nfs_deployment.json
new file mode 100644
index 000000000..edd982e75
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/nfs_deployment.json
@@ -0,0 +1,8 @@
+{
+ "he_bridge_if": "eth0",
+ "he_fqdn": "he-engine.example.com",
+ "he_vm_mac_addr": "00:a5:3f:66:ba:12",
+ "he_domain_type": "nfs",
+ "he_storage_domain_addr": "192.168.100.50",
+ "he_storage_domain_path": "/var/nfs_folder"
+}
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/passwords.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/passwords.yml
new file mode 100644
index 000000000..078194a33
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/passwords.yml
@@ -0,0 +1,13 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+he_appliance_password: 123456
+he_admin_password: 123456
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/required_networks_fix.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/required_networks_fix.yml
new file mode 100644
index 000000000..d9fa2c52c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/required_networks_fix.yml
@@ -0,0 +1,56 @@
+---
+# This is an example for a hook to fix restore-from-file errors
+# due to missing required networks.
+#
+# If you have an existing hosted-engine setup, and the Default cluster
+# has some required network, then if you take a backup and try to restore
+# it with '--restore-from-file', the deployment process cannot know which
+# host nic should be attached to the required network, and so activating
+# the host will fail. This will prompt the user to manually handle the
+# situation via the engine web admin UI.
+#
+# If you already know that beforehand, and want to automate restoration,
+# you can copy current file, edit as needed, and place it in:
+# /usr/share/ansible/collections/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_add_host/
+# File name should end with '.yml'.
+#
+# For more details, see (also):
+# https://docs.ansible.com/ansible/latest/collections/ovirt/ovirt/ovirt_host_network_module.html
+# https://docs.ansible.com/ansible/latest/collections/ovirt/ovirt/ovirt_host_module.html
+
+- include_tasks: auth_sso.yml
+
+- name: Wait for the host to be up
+ ovirt_host_info:
+ pattern: name=myhost
+ auth: "{{ ovirt_auth }}"
+ register: host_result_up_check
+ until: >-
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ (
+ host_result_up_check.ovirt_hosts[0].status == 'up' or
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
+ )
+ retries: 120
+ delay: 10
+ ignore_errors: true
+
+- name: Handle non_operational myhost
+ block:
+ - name: Attach interface eth0 on host myhost to network net1
+ ovirt.ovirt.ovirt_host_network:
+ auth: "{{ ovirt_auth }}"
+ name: myhost
+ interface: eth0
+ networks:
+ - name: net1
+ - name: Activate host myhost
+ ovirt.ovirt.ovirt_host:
+ auth: "{{ ovirt_auth }}"
+ name: myhost
+ state: present
+ when: >-
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/files/35-allow-ansible-for-vdsm.rules b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/files/35-allow-ansible-for-vdsm.rules
new file mode 100644
index 000000000..b50690cbc
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/files/35-allow-ansible-for-vdsm.rules
@@ -0,0 +1,4 @@
+# Added by hosted-engine-setup for running Ansible's commands as vdsm user
+# For more details see https://bugzilla.redhat.com/1903549
+
+allow perm=any uid=36 : dir=/var/tmp/
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_add_host/README.md b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_add_host/README.md
new file mode 100644
index 000000000..54daacbe3
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_add_host/README.md
@@ -0,0 +1,3 @@
+# USAGE
+
+Place here playbooks to be executed after trying to add the host to the engine.
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/README.md b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/README.md
new file mode 100644
index 000000000..9ef275a46
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/README.md
@@ -0,0 +1,3 @@
+# USAGE
+
+Place here playbooks to be executed after hosted-engine-setup finishes. \ No newline at end of file
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/add_host_storage_domain.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/add_host_storage_domain.yml
new file mode 100644
index 000000000..ad7df40e6
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/add_host_storage_domain.yml
@@ -0,0 +1,53 @@
+---
+- name: Include Host vars
+ ansible.builtin.include_vars: "{{ file_item }}"
+ with_fileglob: "/usr/share/ovirt-hosted-engine-setup/gdeploy-inventory.yml"
+ loop_control:
+ loop_var: file_item
+- ansible.builtin.debug: var=gluster
+
+- name: Set Engine public key as authorized key without validating the TLS/SSL certificates
+ connection: ssh
+ authorized_key:
+ user: root
+ state: present
+ key: https://{{ he_fqdn }}/ovirt-engine/services/pki-resource?resource=engine-certificate&format=OPENSSH-PUBKEY
+ validate_certs: false
+ delegate_to: "{{ host }}"
+ with_items: "{{ gluster.hosts }}"
+ loop_control:
+ loop_var: host
+ when: "gluster is defined and 'hosts' in gluster"
+
+- name: Add additional gluster hosts to engine
+ async: 50
+ poll: 0
+ ignore_errors: true
+ ovirt_host:
+ cluster: "{{ he_cluster }}"
+ name: "{{ host }}"
+ address: "{{ host }}"
+ state: present
+ public_key: true
+ auth: "{{ ovirt_auth }}"
+ hosted_engine: deploy
+ with_items: "{{ gluster.hosts }}"
+ loop_control:
+ loop_var: host
+ when: "gluster is defined and 'hosts' in gluster and gluster.hosts | length > 1"
+
+- name: "Add additional glusterfs storage domains"
+ ignore_errors: true
+ ovirt_storage_domain:
+ name: "{{ sd.name }}"
+ host: "{{ he_host_name }}"
+ auth: "{{ ovirt_auth }}"
+ data_center: "{{ datacenter_name }}"
+ glusterfs:
+ address: "{{ he_storage_domain_addr }}"
+ mount_options: "{{ sd.mount_options }}"
+ path: "{{ sd.path }}"
+ with_items: "{{ gluster.vars.storage_domains }}"
+ loop_control:
+ loop_var: sd
+ when: "gluster is defined and 'hosts' in gluster and 'vars' in gluster"
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_after_engine_setup/README.md b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_after_engine_setup/README.md
new file mode 100644
index 000000000..8fcfda4a3
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_after_engine_setup/README.md
@@ -0,0 +1,3 @@
+# USAGE
+
+Place here playbooks to be executed on the engine VM after engine-setup finishes. \ No newline at end of file
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_before_engine_setup/README.md b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_before_engine_setup/README.md
new file mode 100644
index 000000000..1bd048026
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_before_engine_setup/README.md
@@ -0,0 +1,3 @@
+# USAGE
+
+Place here playbooks to be executed on the engine VM before engine-setup starts. \ No newline at end of file
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/add_engine_as_ansible_host.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/add_engine_as_ansible_host.yml
new file mode 100644
index 000000000..8f13b593e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/add_engine_as_ansible_host.yml
@@ -0,0 +1,25 @@
+---
+- name: Add the engine VM as an ansible host
+ block:
+ - name: Fetch the value of HOST_KEY_CHECKING
+ ansible.builtin.set_fact: host_key_checking="{{ lookup('config', 'HOST_KEY_CHECKING') }}"
+ - name: Get the username running the deploy
+ become: false
+ ansible.builtin.command: whoami
+ register: username_on_host
+ changed_when: false
+ - name: Register the engine FQDN as a host
+ ansible.builtin.add_host:
+ name: "{{ he_fqdn }}"
+ groups: engine
+ ansible_connection: smart
+ ansible_ssh_extra_args: >-
+ -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {% if he_ansible_host_name != "localhost" %}
+ -o ProxyCommand="ssh -W %h:%p -q
+ {% if not host_key_checking %} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {% endif %}
+ {{ username_on_host.stdout }}@{{ he_ansible_host_name }}" {% endif %}
+ ansible_ssh_pass: "{{ he_appliance_password }}"
+ ansible_host: "{{ he_fqdn_ansible_host }}"
+ ansible_user: root
+ no_log: true
+ ignore_errors: true
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/alter_libvirt_default_net_configuration.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/alter_libvirt_default_net_configuration.yml
new file mode 100644
index 000000000..bcc5913ec
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/alter_libvirt_default_net_configuration.yml
@@ -0,0 +1,33 @@
+---
+- name: Parse libvirt default network configuration
+ ansible.builtin.command: virsh net-dumpxml default
+ changed_when: false
+ register: default_net_xml
+
+- name: Set network_dict from default_net_xml
+ ansible.builtin.set_fact:
+ network_dict: "{{ default_net_xml['stdout'] | ovirt.ovirt.get_network_xml_to_dict }}"
+
+- name: Create he-network-config.xml from network-config.j2 template
+ ansible.builtin.template:
+ src: templates/network-config.j2
+ dest: /tmp/he-network-config.xml
+ mode: 0644
+
+- name: Update libvirt default network configuration, destroy
+ ansible.builtin.command: virsh net-destroy default
+ ignore_errors: true
+ changed_when: false
+
+- name: Update libvirt default network configuration, undefine
+ ansible.builtin.command: virsh net-undefine default
+ ignore_errors: true
+ changed_when: false
+
+- name: Update libvirt default network configuration, define
+ ansible.builtin.command: virsh net-define --file /tmp/he-network-config.xml
+ changed_when: false
+
+- name: Activate default libvirt network
+ ansible.builtin.command: virsh net-start default
+ changed_when: false
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/apply_openscap_profile.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/apply_openscap_profile.yml
new file mode 100644
index 000000000..fc6658f7b
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/apply_openscap_profile.yml
@@ -0,0 +1,37 @@
+---
+- name: Initialize OpenSCAP variables
+ ansible.builtin.set_fact:
+ oscap_dir: "/usr/share/xml/scap/ssg/content"
+ oscap_ver: "{{ appliance_ver if ansible_distribution != 'Fedora' else '' }}"
+- name: Set OpenSCAP datastream path
+ ansible.builtin.set_fact:
+ oscap_datastream: "{{ oscap_dir }}/ssg-{{ appliance_dist | replace('RedHat', 'rhel') | lower }}{{ oscap_ver }}-ds.xml"
+- name: Verify OpenSCAP datastream
+ ansible.builtin.stat:
+ path: "{{ oscap_datastream }}"
+ register: oscap_ds_stat
+- name: Set OpenSCAP profile
+ ansible.builtin.shell: >-
+ set -euo pipefail && oscap info --profiles {{ oscap_datastream }} |
+ grep -Ei "({{ he_openscap_profile_name }}:)" | sort | tail -1 | cut -d':' -f1
+ register: oscap_profile
+ ignore_errors: true
+ changed_when: true
+ when: oscap_ds_stat.stat.exists
+- name: Apply OpenSCAP profile
+ ansible.builtin.command: >-
+ oscap xccdf eval --profile {{ oscap_profile.stdout }} --remediate
+ --report /root/openscap-report.html {{ oscap_datastream }}
+ register: oscap_applied
+ # Currently, there is no way to know if the security profile
+ # meets the requirments for HE VM (see: https://bugzilla.redhat.com/1983476)
+ # therefore deployment will fail only if there is an error during evaluation
+ # which means rc = 1
+ failed_when: oscap_applied.rc == 1
+ changed_when: true
+ when: oscap_profile.stdout is defined
+- name: Reset PermitRootLogin for sshd
+ ansible.builtin.lineinfile: dest=/etc/ssh/sshd_config
+ regexp="^\s*PermitRootLogin"
+ line="PermitRootLogin yes"
+ state=present
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_revoke.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_revoke.yml
new file mode 100644
index 000000000..feed19b7a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_revoke.yml
@@ -0,0 +1,6 @@
+---
+- name: Always revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_sso_auth.ansible_facts.ovirt_auth }}"
+ ignore_errors: true
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_sso.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_sso.yml
new file mode 100644
index 000000000..8d5af40d2
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_sso.yml
@@ -0,0 +1,13 @@
+---
+- name: Obtain SSO token using username/password credentials
+ # TODO: remove library/ovirt_auth.py when Ansible 2.5 is out explicitly requiring it
+ environment:
+ OVIRT_URL: https://{{ he_fqdn }}/ovirt-engine/api
+ OVIRT_USERNAME: "{{ he_admin_username }}"
+ OVIRT_PASSWORD: "{{ he_admin_password }}"
+ ovirt_auth:
+ insecure: true
+ register: ovirt_sso_auth
+ until: ovirt_sso_auth is succeeded
+ retries: 50
+ delay: 10
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/01_prepare_routing_rules.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/01_prepare_routing_rules.yml
new file mode 100644
index 000000000..4bd99efe9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/01_prepare_routing_rules.yml
@@ -0,0 +1,100 @@
+---
+- name: Prepare routing rules
+ block:
+ - name: Check IPv6
+ ansible.builtin.set_fact:
+ ipv6_deployment: >-
+ {{ true if he_host_ip not in target_address_v4.stdout_lines and
+ he_host_ip in target_address_v6.stdout_lines
+ else false }}
+ - include_tasks: ../validate_ip_prefix.yml
+ - include_tasks: ../alter_libvirt_default_net_configuration.yml
+ # all of the next is a workaround for a network issue:
+ # vdsm installation breaks the routing by defining separate
+ # routing table for ovirtmgmt. But we need to enable communication
+ # between virbr0 and ovirtmgmt
+ - name: Start libvirt
+ ansible.builtin.service:
+ name: libvirtd
+ state: started
+ enabled: true
+ - name: Activate default libvirt network
+ ansible.builtin.command: virsh net-autostart default
+ ignore_errors: true
+ changed_when: false
+ - name: Get routing rules, IPv4
+ ansible.builtin.command: ip -j rule
+ environment: "{{ he_cmd_lang }}"
+ register: route_rules_ipv4
+ changed_when: true
+ - name: Get routing rules, IPv6
+ ansible.builtin.command: ip -6 rule
+ environment: "{{ he_cmd_lang }}"
+ register: route_rules_ipv6
+ changed_when: true
+ when: ipv6_deployment|bool
+ - name: Save bridge name
+ ansible.builtin.set_fact:
+ virbr_default: "{{ network_dict['bridge']['name'] }}"
+ - name: Wait for the bridge to appear on the host
+ ansible.builtin.command: ip link show {{ virbr_default }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: ip_link_show_bridge
+ until: ip_link_show_bridge.rc == 0
+ retries: 30
+ delay: 3
+ - name: Accept IPv6 Router Advertisements for {{ virbr_default }}
+ ansible.builtin.shell: echo 2 > /proc/sys/net/ipv6/conf/{{ virbr_default }}/accept_ra
+ when: ipv6_deployment|bool
+ - name: Refresh network facts
+ ansible.builtin.setup:
+ tags: ['skip_ansible_lint']
+ - name: Fetch IPv4 CIDR for {{ virbr_default }}
+ ansible.builtin.set_fact:
+ virbr_cidr_ipv4: >-
+ {{ (hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv4']['address']+'/'
+ +hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv4']['netmask']) |ipv4('host/prefix') }}
+ when: not ipv6_deployment|bool
+ - name: Fetch IPv6 CIDR for {{ virbr_default }}
+ ansible.builtin.set_fact:
+ virbr_cidr_ipv6: >-
+ {{ (hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv6'][0]['address']+'/'+
+ hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv6'][0]['prefix']) |
+ ipv6('host/prefix') if 'ipv6' in hostvars[inventory_hostname]['ansible_'+virbr_default] else None }}
+ when: ipv6_deployment|bool
+ - name: Add IPv4 outbound route rules
+ ansible.builtin.command: ip rule add from {{ virbr_cidr_ipv4 }} priority 101 table main
+ environment: "{{ he_cmd_lang }}"
+ register: result
+ when: >-
+ not ipv6_deployment|bool and
+ route_rules_ipv4.stdout | from_json |
+ selectattr('priority', 'equalto', 101) |
+ selectattr('src', 'equalto', virbr_cidr_ipv4 | ipaddr('address') ) |
+ list | length == 0
+ changed_when: true
+ - name: Add IPv4 inbound route rules
+ ansible.builtin.command: ip rule add from all to {{ virbr_cidr_ipv4 }} priority 100 table main
+ environment: "{{ he_cmd_lang }}"
+ register: result
+ changed_when: true
+ when: >-
+ not ipv6_deployment|bool and
+ route_rules_ipv4.stdout | from_json |
+ selectattr('priority', 'equalto', 100) |
+ selectattr('dst', 'equalto', virbr_cidr_ipv4 | ipaddr('address') ) |
+ list | length == 0
+ - name: Add IPv6 outbound route rules
+ ansible.builtin.command: ip -6 rule add from {{ virbr_cidr_ipv6 }} priority 101 table main
+ environment: "{{ he_cmd_lang }}"
+ register: result
+ when: ipv6_deployment|bool and "\"101:\tfrom \"+virbr_cidr_ipv6+\" lookup main\" not in route_rules_ipv6.stdout"
+ changed_when: true
+ - name: Add IPv6 inbound route rules
+ ansible.builtin.command: ip -6 rule add from all to {{ virbr_cidr_ipv6 }} priority 100 table main
+ environment: "{{ he_cmd_lang }}"
+ register: result
+ changed_when: true
+ when: >-
+ ipv6_deployment|bool and "\"100:\tfrom all to \"+virbr_cidr_ipv6+\" lookup main\" not in route_rules_ipv6.stdout"
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/02_create_local_vm.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/02_create_local_vm.yml
new file mode 100644
index 000000000..3958eca15
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/02_create_local_vm.yml
@@ -0,0 +1,159 @@
+---
+- name: Create hosted engine local vm
+ block:
+ - name: Initial tasks
+ block:
+ - name: Get host unique id
+ ansible.builtin.shell: |
+ if [ -e /etc/vdsm/vdsm.id ];
+ then cat /etc/vdsm/vdsm.id;
+ elif [ -e /proc/device-tree/system-id ];
+ then cat /proc/device-tree/system-id; #ppc64le
+ else dmidecode -s system-uuid;
+ fi;
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: unique_id_out
+ - name: Create directory for local VM
+ ansible.builtin.tempfile:
+ state: directory
+ path: "{{ he_local_vm_dir_path }}"
+ prefix: "{{ he_local_vm_dir_prefix }}"
+ register: otopi_localvm_dir
+ - name: Set local vm dir path
+ ansible.builtin.set_fact:
+ he_local_vm_dir: "{{ otopi_localvm_dir.path }}"
+ - include_tasks: ../install_appliance.yml
+ when: he_appliance_ova is none or he_appliance_ova|length == 0
+ - name: Register appliance PATH
+ ansible.builtin.set_fact:
+ he_appliance_ova_path: "{{ he_appliance_ova }}"
+ when: he_appliance_ova is not none and he_appliance_ova|length > 0
+ - name: Debug var he_appliance_ova_path
+ ansible.builtin.debug:
+ var: he_appliance_ova_path
+ - name: Check available space on local VM directory
+ ansible.builtin.shell: df -k --output=avail "{{ he_local_vm_dir_path }}" | grep -v Avail | cat
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: local_vm_dir_space_out
+ - name: Check appliance size
+ ansible.builtin.shell: zcat "{{ he_appliance_ova_path }}" | wc --bytes
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: appliance_size
+ - name: Ensure we have enough space to extract the appliance
+ ansible.builtin.assert:
+ that:
+ - "local_vm_dir_space_out.stdout_lines[0]|int * 1024 > appliance_size.stdout_lines[0]|int * 1.1"
+ msg: >
+ {{ he_local_vm_dir_path }} doesn't provide enough free space to extract the
+ engine appliance: {{ local_vm_dir_space_out.stdout_lines[0]|int / 1024 | int }} Mb
+ are available while {{ appliance_size.stdout_lines[0]|int / 1024 / 1024 * 1.1 | int }} Mb
+ are required.
+ - name: Extract appliance to local VM directory
+ ansible.builtin.unarchive:
+ remote_src: true
+ src: "{{ he_appliance_ova_path }}"
+ dest: "{{ he_local_vm_dir }}"
+ extra_opts: ['--sparse']
+ - include_tasks: ../get_local_vm_disk_path.yml
+ - name: Get appliance disk size
+ ansible.builtin.command: qemu-img info --output=json {{ local_vm_disk_path }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: qemu_img_out
+ - name: Debug var qemu_img_out
+ ansible.builtin.debug:
+ var: qemu_img_out
+ - name: Parse qemu-img output
+ ansible.builtin.set_fact:
+ virtual_size={{ qemu_img_out.stdout|from_json|ovirt.ovirt.json_query('"virtual-size"') }}
+ register: otopi_appliance_disk_size
+ - name: Debug var virtual_size
+ ansible.builtin.debug:
+ var: virtual_size
+ - name: Hash the appliance root password
+ ansible.builtin.set_fact:
+ he_hashed_appliance_password: "{{ he_appliance_password | string | password_hash('sha512') }}"
+ no_log: true
+ - name: Create cloud init user-data and meta-data files
+ ansible.builtin.template:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ mode: 0640
+ with_items:
+ - {src: templates/user-data.j2, dest: "{{ he_local_vm_dir }}/user-data"}
+ - {src: templates/meta-data.j2, dest: "{{ he_local_vm_dir }}/meta-data"}
+ - {src: templates/network-config-dhcp.j2, dest: "{{ he_local_vm_dir }}/network-config"}
+ - name: Create ISO disk
+ ansible.builtin.command: >-
+ mkisofs -output {{ he_local_vm_dir }}/seed.iso -volid cidata -joliet -rock -input-charset utf-8
+ {{ he_local_vm_dir }}/meta-data {{ he_local_vm_dir }}/user-data
+ {{ he_local_vm_dir }}/network-config
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Fix local VM directory permission
+ ansible.builtin.file:
+ state: directory
+ path: "{{ he_local_vm_dir }}"
+ owner: vdsm
+ group: qemu
+ recurse: true
+ mode: u=rwX,g=rwX,o=
+ - name: Create local VM
+ ansible.builtin.command: >-
+ virt-install -n {{ he_vm_name }}Local --os-variant rhel8.0 --virt-type kvm --memory {{ he_mem_size_MB }}
+ --vcpus {{ he_vcpus }} --network network=default,mac={{ he_vm_mac_addr }},model=virtio
+ --disk {{ local_vm_disk_path }} --import --disk path={{ he_local_vm_dir }}/seed.iso,device=cdrom
+ --noautoconsole --rng /dev/random --graphics vnc --video vga --sound none --controller usb,model=none
+ --memballoon none --boot hd,menu=off --clock kvmclock_present=yes
+ environment: "{{ he_cmd_lang }}"
+ register: create_local_vm
+ changed_when: true
+ - name: Debug var create_local_vm
+ ansible.builtin.debug:
+ var: create_local_vm
+ - name: Get local VM IP
+ ansible.builtin.shell: virsh -r net-dhcp-leases default | grep -i {{ he_vm_mac_addr }} | awk '{ print $5 }' | cut -f1 -d'/'
+ environment: "{{ he_cmd_lang }}"
+ register: local_vm_ip
+ until: local_vm_ip.stdout_lines|length >= 1
+ retries: 90
+ delay: 10
+ changed_when: true
+ - name: Debug var local_vm_ip
+ ansible.builtin.debug:
+ var: local_vm_ip
+ - name: Remove leftover entries in /etc/hosts for the local VM
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ regexp: "# temporary entry added by hosted-engine-setup for the bootstrap VM$"
+ state: absent
+ - name: Create an entry in /etc/hosts for the local VM
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line:
+ "{{ local_vm_ip.stdout_lines[0] }} \
+ {{ he_fqdn }} # temporary entry added by hosted-engine-setup for the bootstrap VM"
+ insertbefore: BOF
+ backup: true
+ - name: Wait for SSH to restart on the local VM
+ ansible.builtin.wait_for:
+ host='{{ he_fqdn }}'
+ port=22
+ delay=30
+ timeout=300
+ - name: Set the name for add_host
+ ansible.builtin.set_fact:
+ he_fqdn_ansible_host: "{{ local_vm_ip.stdout_lines[0] }}"
+ - import_tasks: ../add_engine_as_ansible_host.yml
+ rescue:
+ - include_tasks: ../clean_localvm_dir.yml
+ - include_tasks: ../clean_local_storage_pools.yml
+ - name: Notify the user about a failure
+ ansible.builtin.fail:
+ msg: >
+ The system may not be provisioned according to the playbook
+ results: please check the logs for the issue,
+ fix accordingly or re-deploy from scratch.
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml
new file mode 100644
index 000000000..775acb1d4
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml
@@ -0,0 +1,132 @@
+---
+- name: Initial engine tasks
+ block:
+ - name: Wait for the local VM
+ ansible.builtin.wait_for_connection:
+ delay: 5
+ timeout: 3600
+ - name: Add an entry for this host on /etc/hosts on the local VM
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: >-
+ {{ hostvars[he_ansible_host_name]['he_host_ip'] }} {{ hostvars[he_ansible_host_name]['he_host_address'] }}
+ - name: Set FQDN
+ ansible.builtin.command: hostnamectl set-hostname {{ he_fqdn }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Force the local VM FQDN to temporary resolve on the natted network address
+ ansible.builtin.lineinfile:
+ path: /etc/hosts
+ line:
+ "{{ hostvars[he_ansible_host_name]['local_vm_ip']['stdout_lines'][0] }} {{ he_fqdn }} # hosted-engine-setup-{{ \
+ hostvars[he_ansible_host_name]['he_local_vm_dir'] }}"
+ - name: Reconfigure IPv6 default gateway
+ ansible.builtin.command: ip -6 route add default via "{{ he_ipv6_subnet_prefix + '::1' }}"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ when: hostvars[he_ansible_host_name]['ipv6_deployment']|bool
+ - name: Restore sshd reverse DNS lookups
+ ansible.builtin.lineinfile:
+ path: /etc/ssh/sshd_config
+ regexp: '^UseDNS'
+ line: "UseDNS yes"
+ - name: Add lines to answerfile
+ ansible.builtin.lineinfile:
+ path: /root/ovirt-engine-answers
+ line: "{{ item }}"
+ no_log: true
+ with_items:
+ - "OVESETUP_CONFIG/adminPassword=str:{{ he_admin_password }}"
+ - name: Add lines to answerfile
+ ansible.builtin.lineinfile:
+ path: /root/ovirt-engine-answers
+ line: "{{ item }}"
+ no_log: true
+ with_items:
+ - "OVESETUP_DB/password=str:{{ he_db_password }}"
+ when: he_db_password is defined
+ - name: Add lines to answerfile
+ ansible.builtin.lineinfile:
+ path: /root/ovirt-engine-answers
+ line: "{{ item }}"
+ no_log: true
+ with_items:
+ - "OVESETUP_DWH_DB/password=str:{{ he_dwh_db_password }}"
+ when: he_dwh_db_password is defined
+ - name: Add keycloak line to answerfile
+ ansible.builtin.lineinfile:
+ path: /root/ovirt-engine-answers
+ line: "{{ item }}"
+ with_items:
+ - "OVESETUP_CONFIG/keycloakEnable=bool:{{ he_enable_keycloak }}"
+ - name: Enable security policy
+ block:
+ - import_tasks: ../get_appliance_dist.yml
+ - name: Apply Security profile
+ block:
+ - name: Import OpenSCAP task
+ import_tasks: ../apply_openscap_profile.yml
+ when: he_apply_openscap_profile|bool
+ - name: Enable FIPS on the engine VM
+ ansible.builtin.command: >-
+ fips-mode-setup --enable
+ changed_when: true
+ when: he_enable_fips|bool
+ - name: Reboot the engine VM to apply security rules
+ ansible.builtin.reboot:
+ reboot_timeout: 1200
+ - name: Check if FIPS mode is enabled
+ block:
+ - name: Check if FIPS mode is enabled
+ ansible.builtin.command: sysctl -n crypto.fips_enabled
+ changed_when: true
+ register: he_fips_enabled
+ - name: Enforce FIPS mode
+ ansible.builtin.fail:
+ msg: "FIPS mode is not enabled as required"
+ when: he_fips_enabled.stdout != "1"
+ when: he_enable_fips|bool
+ when: he_apply_openscap_profile|bool or he_enable_fips|bool
+ - name: Include before engine-setup custom tasks files for the engine VM
+ include_tasks: "{{ before_engine_setup_item }}"
+ with_fileglob: "hooks/enginevm_before_engine_setup/*.yml"
+ loop_control:
+ loop_var: before_engine_setup_item
+ register: include_before_engine_setup_results
+ - name: Pause the execution to allow the user to configure the bootstrap engine VM
+ block:
+ - name: Allow the user to connect to the bootstrap engine VM and change configuration
+ ansible.builtin.debug:
+ msg: >-
+ You can now connect from this host to the bootstrap engine VM using ssh as root
+ and the temporary IP address - {{ hostvars[he_ansible_host_name]['local_vm_ip']['stdout_lines'][0] }}
+ - include_tasks: ../pause_execution.yml
+ when: he_pause_before_engine_setup|bool
+ - name: Restore a backup
+ block:
+ - include_tasks: ../restore_backup.yml
+ when: he_restore_from_file is defined and he_restore_from_file
+ rescue:
+ - name: Sync on engine machine
+ ansible.builtin.command: sync
+ changed_when: true
+ - name: Fetch logs from the engine VM
+ import_tasks: fetch_engine_logs.yml
+ ignore_errors: true
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Get local VM dir path
+ ansible.builtin.set_fact:
+ he_local_vm_dir={{ hostvars[he_ansible_host_name]['he_local_vm_dir'] }}
+ - name: Clean bootstrap engine VM
+ import_tasks: ../clean_localvm_dir.yml
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Clean local storage pools
+ import_tasks: ../clean_local_storage_pools.yml
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Notify the user about a failure
+ ansible.builtin.fail:
+ msg: >
+ There was a failure deploying the engine on the local engine VM.
+ The system may not be provisioned according to the playbook
+ results: please check the logs for the issue,
+ fix accordingly or re-deploy from scratch.
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/04_engine_final_tasks.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/04_engine_final_tasks.yml
new file mode 100644
index 000000000..882d1db7c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/04_engine_final_tasks.yml
@@ -0,0 +1,90 @@
+---
+- name: Final engine tasks
+ block:
+ - name: Include after engine-setup custom tasks files for the engine VM
+ include_tasks: "{{ after_engine_setup_item }}"
+ with_fileglob: "hooks/enginevm_after_engine_setup/*.yml"
+ loop_control:
+ loop_var: after_engine_setup_item
+ register: include_after_engine_setup_results
+ # After a restart the engine has a 5 minute grace time,
+ # other actions like electing a new SPM host or reconstructing
+ # the master storage domain could require more time
+ - name: Wait for the engine to reach a stable condition
+ ansible.builtin.wait_for: timeout=600
+ when: he_restore_from_file is defined and he_restore_from_file
+ - name: Configure LibgfApi support
+ ansible.builtin.command: engine-config -s LibgfApiSupported=true --cver=4.2
+ environment: "{{ he_cmd_lang }}"
+ register: libgfapi_support_out
+ changed_when: true
+ when: he_enable_libgfapi|bool
+ - name: Save original OvfUpdateIntervalInMinutes
+ ansible.builtin.shell: "engine-config -g OvfUpdateIntervalInMinutes | cut -d' ' -f2 > /root/OvfUpdateIntervalInMinutes.txt"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Set OVF update interval to 1 minute
+ ansible.builtin.command: engine-config -s OvfUpdateIntervalInMinutes=1
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Allow the webadmin UI to be accessed over the first host
+ block:
+ - name: Saving original value
+ ansible.builtin.replace:
+ path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf
+ regexp: '^(SSO_ALTERNATE_ENGINE_FQDNS=.*)'
+ replace: '#\1 # pre hosted-engine-setup'
+ - name: Adding new SSO_ALTERNATE_ENGINE_FQDNS line
+ ansible.builtin.lineinfile:
+ path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf
+ line: 'SSO_ALTERNATE_ENGINE_FQDNS="{{ he_host_address }}" # hosted-engine-setup'
+ - name: Restart ovirt-engine service for changed OVF Update configuration and LibgfApi support
+ ansible.builtin.systemd:
+ state: restarted
+ name: ovirt-engine
+ register: restart_out
+ - name: Mask cloud-init services to speed up future boot
+ ansible.builtin.systemd:
+ masked: true
+ name: "{{ item }}"
+ with_items:
+ - cloud-init-local
+ - cloud-init
+ - name: Check if keycloak is configured
+ ansible.builtin.command: otopi-config-query query -k OVESETUP_CONFIG/keycloakEnable -f /etc/ovirt-engine-setup.conf
+ register: keycloak_configured_out
+ ignore_errors: true
+ changed_when: false
+ - name: Set admin username
+ ansible.builtin.set_fact:
+ he_admin_username: >-
+ {{ 'admin@ovirt@internalsso'
+ if keycloak_configured_out.rc == 0 and keycloak_configured_out.stdout_lines[0] == 'True'
+ else 'admin@internal'
+ }}
+ register: otopi_he_admin_username
+ rescue:
+ - name: Sync on engine machine
+ ansible.builtin.command: sync
+ changed_when: true
+ - name: Fetch logs from the engine VM
+ import_tasks: fetch_engine_logs.yml
+ ignore_errors: true
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Get local VM dir path
+ ansible.builtin.set_fact:
+ he_local_vm_dir={{ hostvars[he_ansible_host_name]['he_local_vm_dir'] }}
+ - name: Clean bootstrap engine VM
+ import_tasks: ../clean_localvm_dir.yml
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Clean local storage pools
+ import_tasks: ../clean_local_storage_pools.yml
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Notify the user about a failure
+ ansible.builtin.fail:
+ msg:
+ There was a failure deploying the engine on the local engine VM.
+ The system may not be provisioned according to the playbook results,
+ please check the logs for the issue,
+ fix accordingly or re-deploy from scratch.
+...
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/05_add_host.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/05_add_host.yml
new file mode 100644
index 000000000..21b0ef03e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/05_add_host.yml
@@ -0,0 +1,275 @@
+---
+- name: Add host
+ block:
+ - name: Wait for ovirt-engine service to start
+ ansible.builtin.uri:
+ url: http://{{ he_fqdn }}/ovirt-engine/services/health
+ return_content: true
+ register: engine_status
+ until: "'DB Up!Welcome to Health Status!' in engine_status.content"
+ retries: 30
+ delay: 20
+ - name: Open a port on firewalld
+ ansible.builtin.command: firewall-cmd --zone=public --add-port {{ he_webui_forward_port }}/tcp
+ changed_when: true
+ - name: Expose engine VM webui over a local port via ssh port forwarding
+ ansible.builtin.command: >-
+ sshpass -e ssh -tt -o ServerAliveInterval=5 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -g -L
+ {{ he_webui_forward_port }}:{{ he_fqdn }}:443 {{ he_fqdn }}
+ environment:
+ "{{ he_cmd_lang | combine( { 'SSHPASS': he_appliance_password } ) }}"
+ changed_when: true
+ async: 86400
+ poll: 0
+ register: sshpf
+ - name: Evaluate temporary bootstrap engine VM URL
+ ansible.builtin.set_fact: bootstrap_engine_url="https://{{ he_host_address }}:{{ he_webui_forward_port }}/ovirt-engine/"
+ - name: Display the temporary bootstrap engine VM URL
+ ansible.builtin.debug:
+ msg: >-
+ The bootstrap engine is temporarily accessible via {{ bootstrap_engine_url }}
+ - name: Detect VLAN ID
+ ansible.builtin.shell: ip -d link show {{ he_bridge_if }} | grep 'vlan ' | grep -Po 'id \K[\d]+' | cat
+ environment: "{{ he_cmd_lang }}"
+ register: vlan_id_out
+ changed_when: true
+ - name: Set Engine public key as authorized key without validating the TLS/SSL certificates
+ authorized_key:
+ user: root
+ state: present
+ key: https://{{ he_fqdn }}/ovirt-engine/services/pki-resource?resource=engine-certificate&format=OPENSSH-PUBKEY
+ validate_certs: false
+ - include_tasks: ../auth_sso.yml
+ - name: Ensure that the target datacenter is present
+ ovirt_datacenter:
+ state: present
+ name: "{{ he_data_center }}"
+ compatibility_version: "{{ he_data_center_comp_version | default(omit) }}"
+ wait: true
+ local: false
+ auth: "{{ ovirt_auth }}"
+ register: dc_result_presence
+ - name: Ensure that the target cluster is present in the target datacenter
+ ovirt_cluster:
+ state: present
+ name: "{{ he_cluster }}"
+ compatibility_version: "{{ he_cluster_comp_version | default(omit) }}"
+ data_center: "{{ he_data_center }}"
+ cpu_type: "{{ he_cluster_cpu_type | default(omit) }}"
+ wait: true
+ auth: "{{ ovirt_auth }}"
+ register: cluster_result_presence
+ - name: Check actual cluster location
+ ansible.builtin.fail:
+ msg: >-
+ A cluster named '{{ he_cluster }}' has been created earlier in a different
+ datacenter and cluster moving is still not supported.
+ You can avoid this specifying a different cluster name;
+ please fix accordingly and try again.
+ when: cluster_result_presence.cluster.data_center.id != dc_result_presence.datacenter.id
+ - name: Enable GlusterFS at cluster level
+ ovirt_cluster:
+ data_center: "{{ he_data_center }}"
+ name: "{{ he_cluster }}"
+ compatibility_version: "{{ he_cluster_comp_version | default(omit) }}"
+ auth: "{{ ovirt_auth }}"
+ virt: true
+ gluster: true
+ fence_skip_if_gluster_bricks_up: true
+ fence_skip_if_gluster_quorum_not_met: true
+ when: he_enable_hc_gluster_service is defined and he_enable_hc_gluster_service
+ - name: Set VLAN ID at datacenter level
+ ovirt_network:
+ data_center: "{{ he_data_center }}"
+ name: "{{ he_mgmt_network }}"
+ vlan_tag: "{{ vlan_id_out.stdout }}"
+ auth: "{{ ovirt_auth }}"
+ when: vlan_id_out.stdout|length > 0
+ - name: Get active list of active firewalld zones
+ ansible.builtin.shell: set -euo pipefail && firewall-cmd --get-active-zones | grep -v "^\s*interfaces"
+ environment: "{{ he_cmd_lang }}"
+ register: active_f_zone
+ changed_when: true
+ - name: Configure libvirt firewalld zone
+ ansible.builtin.command: firewall-cmd --zone=libvirt --permanent --add-service={{ service_item }}
+ with_items:
+ - vdsm
+ - libvirt-tls
+ - ovirt-imageio
+ - ovirt-vmconsole
+ - ssh
+ loop_control:
+ loop_var: service_item
+ when: "'libvirt' in active_f_zone.stdout_lines"
+ - name: Reload firewall-cmd
+ ansible.builtin.command: firewall-cmd --reload
+ changed_when: true
+ - name: Add host
+ ovirt_host:
+ cluster: "{{ he_cluster }}"
+ name: "{{ he_host_name }}"
+ state: present
+ public_key: true
+ address: "{{ he_host_address }}"
+ auth: "{{ ovirt_auth }}"
+ reboot_after_installation: false
+ async: 1
+ poll: 0
+ - name: Include after_add_host tasks files
+ include_tasks: "{{ after_add_host_item }}"
+ with_fileglob: "hooks/after_add_host/*.yml"
+ loop_control:
+ loop_var: after_add_host_item
+ register: include_after_add_host_results
+ - name: Pause the execution to let the user interactively reconfigure the host
+ block:
+ - name: Let the user connect to the bootstrap engine VM to manually fix host configuration
+ ansible.builtin.debug:
+ msg: >-
+ You can now connect to {{ bootstrap_engine_url }} and check the status of this host and
+ eventually remediate it, please continue only when the host is listed as 'up'
+ - include_tasks: ../pause_execution.yml
+ when: he_pause_host|bool
+ # refresh the auth token after a long operation to avoid having it expired
+ - include_tasks: ../auth_revoke.yml
+ - include_tasks: ../auth_sso.yml
+ - name: Wait for the host to be up
+ ovirt_host_info:
+ pattern: name={{ he_host_name }}
+ auth: "{{ ovirt_auth }}"
+ register: host_result_up_check
+ until: >-
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ (
+ host_result_up_check.ovirt_hosts[0].status == 'up' or
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
+ )
+ retries: 120
+ delay: 10
+ ignore_errors: true
+ - name: Notify the user about a failure
+ ansible.builtin.fail:
+ msg: >-
+ Host is not up, please check logs, perhaps also on the engine machine
+ when: host_result_up_check is failed
+
+ - name: Emit error messages about the failure
+ block:
+ - name: Set host_id
+ ansible.builtin.set_fact: host_id={{ host_result_up_check.ovirt_hosts[0].id }}
+ - name: Collect error events from the Engine
+ ovirt_event_info:
+ auth: "{{ ovirt_auth }}"
+ search: "severity>=warning"
+ register: error_events
+
+ - name: Generate the error message from the engine events
+ ansible.builtin.set_fact:
+ error_description: >-
+ {% for event in error_events.ovirt_events | groupby('code') %}
+ {% if 'host' in event[1][0] and 'id' in event[1][0].host and event[1][0].host.id == host_id %}
+ code {{ event[0] }}: {{ event[1][0].description }},
+ {% endif %}
+ {% endfor %}
+ ignore_errors: true
+
+ - name: Notify with error description
+ ansible.builtin.debug:
+ msg: >-
+ The host has been set in non_operational status,
+ deployment errors: {{ error_description }}
+ when: error_description is defined
+
+ - name: Notify with generic error
+ ansible.builtin.debug:
+ msg: >-
+ The host has been set in non_operational status,
+ please check engine logs,
+ more info can be found in the engine logs.
+ when: error_description is not defined
+ when: >-
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
+
+ - name: Pause the execution to let the user interactively reconfigure the host
+ block:
+ - name: Let the user connect to the bootstrap engine to manually fix host configuration
+ ansible.builtin.debug:
+ msg: >-
+ You can now connect to {{ bootstrap_engine_url }} and check the status of this host and
+ eventually remediate it, please continue only when the host is listed as 'up'
+ - include_tasks: ../pause_execution.yml
+ when: >-
+ he_pause_after_failed_add_host|bool and
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
+
+ # refresh the auth token after a long operation to avoid having it expired
+ - include_tasks: ../auth_revoke.yml
+ - include_tasks: ../auth_sso.yml
+ - name: Check if the host is up
+ ovirt_host_info:
+ pattern: name={{ he_host_name }}
+ auth: "{{ ovirt_auth }}"
+ register: host_result_up_check
+ ignore_errors: true
+
+ - name: Handle deployment failure
+ block:
+ - name: Set host_id
+ ansible.builtin.set_fact: host_id={{ host_result_up_check.ovirt_hosts[0].id }}
+ - name: Collect error events from the Engine
+ ovirt_event_info:
+ auth: "{{ ovirt_auth }}"
+ search: "severity>=warning"
+ register: error_events
+
+ - name: Generate the error message from the engine events
+ ansible.builtin.set_fact:
+ error_description: >-
+ {% for event in error_events.ovirt_events | groupby('code') %}
+ {% if event[1][0].host.id == host_id %}
+ code {{ event[0] }}: {{ event[1][0].description }},
+ {% endif %}
+ {% endfor %}
+ ignore_errors: true
+
+ - name: Fail with error description
+ ansible.builtin.fail:
+ msg: >-
+ The host has been set in non_operational status,
+ deployment errors: {{ error_description }}
+ fix accordingly and re-deploy.
+ when: error_description is defined
+
+ - name: Fail with generic error
+ ansible.builtin.fail:
+ msg: >-
+ The host has been set in non_operational status,
+ please check engine logs,
+ more info can be found in the engine logs,
+ fix accordingly and re-deploy.
+ when: error_description is not defined
+
+ when: >-
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
+ rescue:
+ - name: Sync on engine machine
+ ansible.builtin.command: sync
+ changed_when: true
+ - name: Fetch logs from the engine VM
+ include_tasks: fetch_engine_logs.yml
+ ignore_errors: true
+ - include_tasks: ../clean_localvm_dir.yml
+ - include_tasks: ../clean_local_storage_pools.yml
+ - name: Notify the user about a failure
+ ansible.builtin.fail:
+ msg: >
+ The system may not be provisioned according to the playbook
+ results: please check the logs for the issue,
+ fix accordingly or re-deploy from scratch.
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_cloud_init_config.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_cloud_init_config.yml
new file mode 100644
index 000000000..bf5aacfed
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_cloud_init_config.yml
@@ -0,0 +1,20 @@
+---
+- name: Remove cloud-user user
+ ansible.builtin.user:
+ name: cloud-user
+ state: absent
+ remove: true
+- name: Remove cloud-init file from /etc/sudoers.d
+ ansible.builtin.file:
+ path: /etc/sudoers.d/90-cloud-init-users
+ state: absent
+- name: Remove cloud-user from /etc/sudoers file
+ ansible.builtin.lineinfile:
+ path: /etc/sudoers
+ state: absent
+ regexp: '^cloud-user'
+- name: Remove cloud-init package
+ ansible.builtin.dnf:
+ name: cloud-init
+ state: absent
+ disablerepo: "*"
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_local_storage_pools.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_local_storage_pools.yml
new file mode 100644
index 000000000..adf6d4196
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_local_storage_pools.yml
@@ -0,0 +1,28 @@
+---
+- name: Clean storage-pool
+ block:
+ - name: Destroy local storage-pool {{ he_local_vm_dir | basename }}
+ ansible.builtin.command: >-
+ virsh -c qemu:///system?authfile={{ he_libvirt_authfile }}
+ pool-destroy {{ he_local_vm_dir | basename }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Undefine local storage-pool {{ he_local_vm_dir | basename }}
+ ansible.builtin.command: >-
+ virsh -c qemu:///system?authfile={{ he_libvirt_authfile }}
+ pool-undefine {{ he_local_vm_dir | basename }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Destroy local storage-pool {{ local_vm_disk_path.split('/')[5] }}
+ ansible.builtin.command: >-
+ virsh -c qemu:///system?authfile={{ he_libvirt_authfile }}
+ pool-destroy {{ local_vm_disk_path.split('/')[5] }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Undefine local storage-pool {{ local_vm_disk_path.split('/')[5] }}
+ ansible.builtin.command: >-
+ virsh -c qemu:///system?authfile={{ he_libvirt_authfile }}
+ pool-undefine {{ local_vm_disk_path.split('/')[5] }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ ignore_errors: true
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_localvm_dir.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_localvm_dir.yml
new file mode 100644
index 000000000..f4708366a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_localvm_dir.yml
@@ -0,0 +1,11 @@
+---
+- name: Remove local vm dir
+ ansible.builtin.file:
+ path: "{{ he_local_vm_dir }}"
+ state: absent
+ register: rm_localvm_dir
+- name: Remove temporary entry in /etc/hosts for the local VM
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ regexp: "# temporary entry added by hosted-engine-setup for the bootstrap VM$"
+ state: absent
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_storage_domain.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_storage_domain.yml
new file mode 100644
index 000000000..5e7c510f0
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_storage_domain.yml
@@ -0,0 +1,191 @@
+---
+- name: Create hosted engine local vm
+ block:
+ - name: Wait for the storage interface to be up
+ ansible.builtin.command: ip -j link show '{{ he_storage_if }}'
+ register: storage_if_result_up_check
+ until: >-
+ storage_if_result_up_check.stdout|from_json|map(attribute='operstate')|join('') == 'UP'
+ retries: 120
+ delay: 5
+ delegate_to: "{{ he_ansible_host_name }}"
+ when: (he_domain_type == "glusterfs" or he_domain_type == "nfs") and he_storage_if is not none
+ - name: Check local VM dir stat
+ ansible.builtin.stat:
+ path: "{{ he_local_vm_dir }}"
+ register: local_vm_dir_stat
+ - name: Enforce local VM dir existence
+ ansible.builtin.fail:
+ msg: "Local VM dir '{{ he_local_vm_dir }}' doesn't exist"
+ when: not local_vm_dir_stat.stat.exists
+ - include_tasks: auth_sso.yml
+ - name: Fetch host facts
+ ovirt_host_info:
+ pattern: name={{ he_host_name }}
+ auth: "{{ ovirt_auth }}"
+ register: host_result
+ until: >-
+ host_result and 'ovirt_hosts' in host_result
+ and host_result.ovirt_hosts|length >= 1 and
+ 'up' in host_result.ovirt_hosts[0].status
+ retries: 50
+ delay: 10
+ - name: Fetch cluster ID
+ ansible.builtin.set_fact: cluster_id="{{ host_result.ovirt_hosts[0].cluster.id }}"
+ - name: Fetch cluster facts
+ ovirt_cluster_info:
+ auth: "{{ ovirt_auth }}"
+ register: cluster_facts
+ - name: Fetch Datacenter facts
+ ovirt_datacenter_info:
+ auth: "{{ ovirt_auth }}"
+ register: datacenter_facts
+ - name: Fetch Datacenter ID
+ ansible.builtin.set_fact: >-
+ datacenter_id={{ cluster_facts.ovirt_clusters|ovirt.ovirt.json_query("[?id=='" + cluster_id + "'].data_center.id")|first }}
+ - name: Fetch Datacenter name
+ ansible.builtin.set_fact: >-
+ datacenter_name={{ datacenter_facts.ovirt_datacenters|ovirt.ovirt.json_query("[?id=='" + datacenter_id + "'].name")|first }}
+ - name: Fetch cluster name
+ ansible.builtin.set_fact: >-
+ cluster_name={{ cluster_facts.ovirt_clusters|ovirt.ovirt.json_query("[?id=='" + cluster_id + "'].name")|first }}
+ - name: Fetch cluster version
+ ansible.builtin.set_fact: >-
+ cluster_version={{ cluster_facts.ovirt_clusters|ovirt.ovirt.json_query("[?id=='" + cluster_id + "'].version")|first }}
+ - name: Enforce cluster major version
+ ansible.builtin.fail:
+ msg: "Cluster {{ cluster_name }} major version is {{ cluster_version.major }}, needs to be at least 4"
+ when: cluster_version.major < 4
+ - name: Enforce cluster minor version
+ ansible.builtin.fail:
+ msg: "Cluster {{ cluster_name }} minor version is {{ cluster_version.minor }}, needs to be at least 2"
+ when: cluster_version.minor < 2
+ - name: Set storage_format
+ ansible.builtin.set_fact: >-
+ storage_format={{ 'v4' if cluster_version.minor == 2 else 'v5' }}
+ - name: Add NFS storage domain
+ ovirt_storage_domain:
+ state: unattached
+ name: "{{ he_storage_domain_name }}"
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ storage_format: "{{ storage_format }}"
+ wait: true
+ nfs:
+ address: "{{ he_storage_domain_addr }}"
+ path: "{{ he_storage_domain_path }}"
+ mount_options: "{{ he_mount_options }}"
+ version: "{{ he_nfs_version }}"
+ auth: "{{ ovirt_auth }}"
+ when: he_domain_type == "nfs"
+ register: otopi_storage_domain_details_nfs
+ - name: Add glusterfs storage domain
+ ovirt_storage_domain:
+ state: unattached
+ name: "{{ he_storage_domain_name }}"
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ storage_format: "{{ storage_format }}"
+ wait: true
+ glusterfs:
+ address: "{{ he_storage_domain_addr }}"
+ path: "{{ he_storage_domain_path }}"
+ mount_options: "{{ he_mount_options }}"
+ auth: "{{ ovirt_auth }}"
+ when: he_domain_type == "glusterfs"
+ register: otopi_storage_domain_details_gluster
+ - name: Add iSCSI storage domain
+ ovirt_storage_domain:
+ state: unattached
+ name: "{{ he_storage_domain_name }}"
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ storage_format: "{{ storage_format }}"
+ wait: true
+ discard_after_delete: "{{ he_discard }}"
+ # we are sending a single iSCSI path but, not sure if intended or if
+ # it's bug, the engine is implicitly creating the storage domain
+ # consuming all the path that are already connected on the host (we
+ # cannot logout since there is not logout command in the rest API, see
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1535951 ).
+ iscsi:
+ address: "{{ he_storage_domain_addr.split(',')|first }}"
+ port: "{{ he_iscsi_portal_port.split(',')|first if he_iscsi_portal_port is string else he_iscsi_portal_port }}"
+ target: "{{ he_iscsi_target }}"
+ lun_id: "{{ he_lun_id }}"
+ username: "{{ he_iscsi_username }}"
+ password: "{{ he_iscsi_password }}"
+ auth: "{{ ovirt_auth }}"
+ when: he_domain_type == "iscsi"
+ register: otopi_storage_domain_details_iscsi
+ - name: Add Fibre Channel storage domain
+ ovirt_storage_domain:
+ state: unattached
+ name: "{{ he_storage_domain_name }}"
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ storage_format: "{{ storage_format }}"
+ wait: true
+ discard_after_delete: "{{ he_discard }}"
+ fcp:
+ lun_id: "{{ he_lun_id }}"
+ auth: "{{ ovirt_auth }}"
+ register: otopi_storage_domain_details_fc
+ when: he_domain_type == "fc"
+ - name: Get storage domain details
+ ovirt_storage_domain_info:
+ pattern: name={{ he_storage_domain_name }}
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_details
+ - name: Find the appliance OVF
+ ansible.builtin.find:
+ paths: "{{ he_local_vm_dir }}/master"
+ recurse: true
+ patterns: ^.*.(?<!meta).ovf$
+ use_regex: true
+ register: app_ovf
+ - name: Get ovf data
+ ansible.builtin.command: cat "{{ app_ovf.files[0].path }}"
+ register: ovf_data
+ changed_when: false
+ - name: Get disk size from ovf data
+ ansible.builtin.set_fact:
+ disk_size: "{{ ovf_data['stdout'] | ovirt.ovirt.get_ovf_disk_size }}"
+ - name: Get required size
+ ansible.builtin.set_fact:
+ required_size: >-
+ {{ disk_size|int * 1024 * 1024 * 1024 +
+ storage_domain_details.ovirt_storage_domains[0].critical_space_action_blocker|int *
+ 1024 * 1024 * 1024 + 5 * 1024 * 1024 * 1024 }}
+ # +5G: 2xOVF_STORE, lockspace, metadata, configuration
+ - name: Remove unsuitable storage domain
+ ovirt_storage_domain:
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ name: "{{ he_storage_domain_name }}"
+ wait: true
+ state: absent
+ destroy: true
+ auth: "{{ ovirt_auth }}"
+ when: storage_domain_details.ovirt_storage_domains[0].available|int < required_size|int
+ register: remove_storage_domain_details
+ - name: Check storage domain free space
+ ansible.builtin.fail:
+ msg: >-
+ Error: the target storage domain contains only
+ {{ storage_domain_details.ovirt_storage_domains[0].available|int / 1024 / 1024 / 1024 }}GiB of
+ available space while a minimum of {{ required_size|int / 1024 / 1024 / 1024 }}GiB is required
+ If you wish to use the current target storage domain by extending it, make sure it contains nothing
+ before adding it.
+ when: storage_domain_details.ovirt_storage_domains[0].available|int < required_size|int
+ - name: Activate storage domain
+ ovirt_storage_domain:
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ name: "{{ he_storage_domain_name }}"
+ wait: true
+ state: present
+ auth: "{{ ovirt_auth }}"
+ when: storage_domain_details.ovirt_storage_domains[0].available|int >= required_size|int
+ register: otopi_storage_domain_details
+...
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/01_create_target_hosted_engine_vm.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/01_create_target_hosted_engine_vm.yml
new file mode 100644
index 000000000..9f916e9ce
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/01_create_target_hosted_engine_vm.yml
@@ -0,0 +1,173 @@
+---
+- name: Create target Hosted Engine VM
+ block:
+ - include_tasks: ../auth_sso.yml
+ - name: Get local VM IP
+ ansible.builtin.shell: virsh -r net-dhcp-leases default | grep -i {{ he_vm_mac_addr }} | awk '{ print $5 }' | cut -f1 -d'/'
+ environment: "{{ he_cmd_lang }}"
+ register: local_vm_ip
+ changed_when: true
+ - name: Set the name for add_host
+ ansible.builtin.set_fact:
+ he_fqdn_ansible_host: "{{ local_vm_ip.stdout_lines[0] }}"
+ - import_tasks: ../add_engine_as_ansible_host.yml
+ - name: Fetch host facts
+ ovirt_host_info:
+ pattern: name={{ he_host_name }} status=up
+ auth: "{{ ovirt_auth }}"
+ register: host_result
+ until: host_result is succeeded and host_result.ovirt_hosts|length >= 1
+ retries: 50
+ delay: 10
+ - name: Fetch Cluster ID
+ ansible.builtin.set_fact: cluster_id="{{ host_result.ovirt_hosts[0].cluster.id }}"
+ - name: Fetch Cluster facts
+ ovirt_cluster_info:
+ auth: "{{ ovirt_auth }}"
+ register: cluster_facts
+ - name: Fetch Datacenter facts
+ ovirt_datacenter_info:
+ auth: "{{ ovirt_auth }}"
+ register: datacenter_facts
+ - name: Fetch Cluster name
+ ansible.builtin.set_fact: cluster_name={{ cluster_facts.ovirt_clusters|ovirt.ovirt.json_query("[?id=='" + cluster_id + "'].name")|first }}
+ - name: Fetch Datacenter ID
+ ansible.builtin.set_fact: >-
+ datacenter_id={{ cluster_facts.ovirt_clusters|ovirt.ovirt.json_query("[?id=='" + cluster_id + "'].data_center.id")|first }}
+ - name: Fetch Datacenter name
+ ansible.builtin.set_fact: >-
+ datacenter_name={{ datacenter_facts.ovirt_datacenters|ovirt.ovirt.json_query("[?id=='" + datacenter_id + "'].name")|first }}
+ - name: Parse Cluster details
+ ansible.builtin.set_fact:
+ cluster_cpu: >-
+ {{ cluster_facts.ovirt_clusters|selectattr('id', 'match', '^'+cluster_id+'$')|map(attribute='cpu')|list|first }}
+ cluster_version: >-
+ {{ cluster_facts.ovirt_clusters|selectattr('id', 'match', '^'+cluster_id+'$')|
+ map(attribute='version')|list|first }}
+ - name: Get server CPU list
+ ovirt.ovirt.ovirt_system_option_info:
+ auth: "{{ ovirt_auth }}"
+ name: ServerCPUList
+ version: "{{ cluster_version.major }}.{{ cluster_version.minor }}"
+ register: server_cpu_list
+ - name: Get cluster emulated machine list
+ ovirt.ovirt.ovirt_system_option_info:
+ name: ClusterEmulatedMachines
+ auth: "{{ ovirt_auth }}"
+ version: "{{ cluster_version.major }}.{{ cluster_version.minor }}"
+ register: emulated_machine_list
+ - name: Prepare for parsing server CPU list
+ ansible.builtin.set_fact:
+ server_cpu_dict: {}
+ - name: Parse server CPU list
+ ansible.builtin.set_fact:
+ server_cpu_dict: "{{ server_cpu_dict | combine({item.split(':')[1]: item.split(':')[3]}) }}"
+ with_items: >-
+ {{ server_cpu_list['ovirt_system_option']['values'][0]['value'].split('; ')|list|difference(['']) }}
+ - name: Convert CPU model name
+ ansible.builtin.set_fact:
+ cluster_cpu_model: "{{ server_cpu_dict[cluster_cpu.type] }}"
+ - name: Parse emulated_machine
+ ansible.builtin.set_fact:
+ emulated_machine: >-
+ {{ emulated_machine_list['ovirt_system_option']['values'][0]['value'].replace(
+ '[','').replace(']','').split(', ')|first }}
+ - name: Get storage domain details
+ ovirt_storage_domain_info:
+ pattern: name={{ he_storage_domain_name }} and datacenter={{ datacenter_name }}
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_details
+ - name: Add HE disks
+ ovirt_disk:
+ name: "{{ item.name }}"
+ size: "{{ item.size }}"
+ format: "{{ item.format }}"
+ sparse: "{{ item.sparse }}"
+ description: "{{ item.description }}"
+ content_type: "{{ item.content }}"
+ interface: virtio
+ storage_domain: "{{ he_storage_domain_name }}"
+ wait: true
+ timeout: 600
+ auth: "{{ ovirt_auth }}"
+ with_items:
+ - {
+ name: 'he_virtio_disk',
+ description: 'Hosted-Engine disk',
+ size: "{{ he_disk_size_GB }}GiB",
+ format: 'raw',
+ sparse: "{{ false if he_domain_type == 'fc' or he_domain_type == 'iscsi' else true }}",
+ content: 'hosted_engine'
+ }
+ - {
+ name: 'he_sanlock',
+ description: 'Hosted-Engine sanlock disk',
+ size: '1GiB',
+ format: 'raw',
+ sparse: false,
+ content: 'hosted_engine_sanlock'
+ }
+ - {
+ name: 'HostedEngineConfigurationImage',
+ description: 'Hosted-Engine configuration disk',
+ size: '1GiB',
+ format: 'raw',
+ sparse: false,
+ content: 'hosted_engine_configuration'
+ }
+ - {
+ name: 'he_metadata',
+ description: 'Hosted-Engine metadata disk',
+ size: '128MiB',
+ format: 'raw',
+ sparse: false,
+ content: 'hosted_engine_metadata'
+ }
+ register: add_disks
+ - name: Register disk details
+ ansible.builtin.set_fact:
+ he_virtio_disk_details: "{{ add_disks.results[0] }}"
+ he_sanlock_disk_details: "{{ add_disks.results[1] }}"
+ he_conf_disk_details: "{{ add_disks.results[2] }}"
+ he_metadata_disk_details: "{{ add_disks.results[3] }}"
+ - name: Set VNC graphic protocol
+ ansible.builtin.set_fact:
+ he_graphic_protocols: [vnc]
+ - name: Check if FIPS is enabled
+ ansible.builtin.command: sysctl -n crypto.fips_enabled
+ register: he_fips_enabled
+ changed_when: false
+ - name: Add VM
+ ovirt_vm:
+ state: stopped
+ cluster: "{{ cluster_name }}"
+ name: "{{ he_vm_name }}"
+ description: 'Hosted Engine Virtual Machine'
+ memory: "{{ he_mem_size_MB }}Mib"
+ cpu_cores: "{{ he_vcpus }}"
+ cpu_sockets: 1
+ graphical_console:
+ headless_mode: false
+ protocol: "{{ he_graphic_protocols }}"
+ serial_console: false
+ operating_system: rhel_8x64
+ bios_type: q35_sea_bios
+ type: server
+ high_availability_priority: 1
+ high_availability: false
+ delete_protected: true
+ # timezone: "{{ he_time_zone }}" # TODO: fix with the right parameter syntax
+ disks:
+ - id: "{{ he_virtio_disk_details.disk.id }}"
+ nics:
+ - name: vnet0
+ profile_name: "{{ he_mgmt_network }}"
+ interface: virtio
+ mac_address: "{{ he_vm_mac_addr }}"
+ auth: "{{ ovirt_auth }}"
+ register: he_vm_details
+ - name: Register external local VM uuid
+ ansible.builtin.shell: virsh -r domuuid {{ he_vm_name }}Local | head -1
+ environment: "{{ he_cmd_lang }}"
+ register: external_local_vm_uuid
+ changed_when: true
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/02_engine_vm_configuration.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/02_engine_vm_configuration.yml
new file mode 100644
index 000000000..849cba789
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/02_engine_vm_configuration.yml
@@ -0,0 +1,81 @@
+---
+- name: Engine VM configuration tasks
+ block:
+ - name: Create a temporary directory for ansible as postgres user
+ ansible.builtin.file:
+ path: /var/lib/pgsql/.ansible/tmp
+ state: directory
+ owner: postgres
+ group: postgres
+ mode: 0700
+ - name: Update target VM details at DB level
+ ansible.builtin.command: >-
+ "{{ engine_psql }}" -c
+ "UPDATE vm_static SET {{ item.field }}={{ item.value }} WHERE
+ vm_guid='{{ hostvars[he_ansible_host_name]['he_vm_details']['vm']['id'] }}'"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: db_vm_update
+ with_items:
+ - {field: 'origin', value: 6}
+ - name: Insert Hosted Engine configuration disk uuid into Engine database
+ ansible.builtin.command: >-
+ "{{ engine_psql }}" -c
+ "UPDATE vdc_options SET option_value=
+ '{{ hostvars[he_ansible_host_name]['he_conf_disk_details']['disk']['id'] }}'
+ WHERE option_name='HostedEngineConfigurationImageGuid' AND version='general'"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: db_conf_update
+ - name: Fetch host SPM_ID
+ ansible.builtin.command: >-
+ "{{ engine_psql }}" -t -c
+ "SELECT vds_spm_id FROM vds WHERE vds_name='{{ hostvars[he_ansible_host_name]['he_host_name'] }}'"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: host_spm_id_out
+ - name: Parse host SPM_ID
+ ansible.builtin.set_fact: host_spm_id="{{ host_spm_id_out.stdout_lines|first|trim }}"
+ - name: Restore original DisableFenceAtStartupInSec
+ ansible.builtin.shell: "engine-config -s DisableFenceAtStartupInSec=$(cat /root/DisableFenceAtStartupInSec.txt)"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ when: he_restore_from_file is defined and he_restore_from_file
+ - name: Remove DisableFenceAtStartupInSec temporary file
+ ansible.builtin.file:
+ path: /root/DisableFenceAtStartupInSec.txt
+ state: absent
+ when: he_restore_from_file is defined and he_restore_from_file
+ - name: Restore original OvfUpdateIntervalInMinutes
+ ansible.builtin.shell: "engine-config -s OvfUpdateIntervalInMinutes=$(cat /root/OvfUpdateIntervalInMinutes.txt)"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Remove OvfUpdateIntervalInMinutes temporary file
+ ansible.builtin.file:
+ path: /root/OvfUpdateIntervalInMinutes.txt
+ state: absent
+ changed_when: true
+ - name: Restore original SSO_ALTERNATE_ENGINE_FQDNS
+ block:
+ - name: Removing temporary value
+ ansible.builtin.lineinfile:
+ path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf
+ regexp: '^SSO_ALTERNATE_ENGINE_FQDNS=.* # hosted-engine-setup'
+ state: absent
+ - name: Restoring original value
+ ansible.builtin.replace:
+ path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf
+ regexp: '^#(SSO_ALTERNATE_ENGINE_FQDNS=.*) # pre hosted-engine-setup'
+ replace: '\1'
+ - name: Remove temporary directory for ansible as postgres user
+ ansible.builtin.file:
+ path: /var/lib/pgsql/.ansible
+ state: absent
+ - name: Configure PermitRootLogin for sshd to its final value
+ ansible.builtin.lineinfile:
+ dest: /etc/ssh/sshd_config
+ regexp: "^\\s*PermitRootLogin"
+ line: "PermitRootLogin {{ he_root_ssh_access }}"
+ state: present
+ - name: Clean cloud-init configuration
+ include_tasks: ../clean_cloud_init_config.yml
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/03_hosted_engine_final_tasks.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/03_hosted_engine_final_tasks.yml
new file mode 100644
index 000000000..b7641d1d1
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/03_hosted_engine_final_tasks.yml
@@ -0,0 +1,501 @@
+---
+- name: Hosted-Engine final tasks
+ block:
+ - name: Choose IPv4, IPv6 or auto
+ import_tasks: ../ipv_switch.yml
+ - name: Trigger hosted engine OVF update and enable the serial console
+ ovirt_vm:
+ id: "{{ he_vm_details.vm.id }}"
+ description: "Hosted engine VM"
+ serial_console: true
+ auth: "{{ ovirt_auth }}"
+ - name: Wait until OVF update finishes
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ fetch_nested: true
+ nested_attributes:
+ - name
+ - image_id
+ - id
+ pattern: "name={{ he_storage_domain_name }}"
+ retries: 12
+ delay: 10
+ register: storage_domain_details
+ until: "storage_domain_details.ovirt_storage_domains[0].disks | selectattr('name', 'match', '^OVF_STORE$') | list"
+ - name: Parse OVF_STORE disk list
+ ansible.builtin.set_fact:
+ ovf_store_disks: >-
+ {{ storage_domain_details.ovirt_storage_domains[0].disks |
+ selectattr('name', 'match', '^OVF_STORE$') | list }}
+ - name: Check OVF_STORE volume status
+ ansible.builtin.command: >-
+ vdsm-client Volume getInfo storagepoolID={{ datacenter_id }}
+ storagedomainID={{ storage_domain_details.ovirt_storage_domains[0].id }}
+ imageID={{ item.id }} volumeID={{ item.image_id }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: ovf_store_status
+ retries: 12
+ delay: 10
+ until: >-
+ ovf_store_status.rc == 0 and ovf_store_status.stdout|from_json|ovirt.ovirt.json_query('status') == 'OK' and
+ ovf_store_status.stdout|from_json|ovirt.ovirt.json_query('description')|from_json|ovirt.ovirt.json_query('Updated')
+ with_items: "{{ ovf_store_disks }}"
+ - name: Wait for OVF_STORE disk content
+ ansible.builtin.shell: >-
+ vdsm-client Image prepare storagepoolID={{ datacenter_id }}
+ storagedomainID={{ storage_domain_details.ovirt_storage_domains[0].id }} imageID={{ item.id }}
+ volumeID={{ item.image_id }} | grep path | awk '{ print $2 }' |
+ xargs -I{} sudo -u vdsm dd if={} | tar -tvf - {{ he_vm_details.vm.id }}.ovf
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: ovf_store_content
+ retries: 12
+ delay: 10
+ until: ovf_store_content.rc == 0
+ with_items: "{{ ovf_store_disks }}"
+ args:
+ warn: false
+ - name: Prepare images
+ ansible.builtin.command: >-
+ vdsm-client Image prepare storagepoolID={{ datacenter_id }}
+ storagedomainID={{ storage_domain_details.ovirt_storage_domains[0].id }}
+ imageID={{ item.disk.id }} volumeID={{ item.disk.image_id }}
+ environment: "{{ he_cmd_lang }}"
+ with_items:
+ - "{{ he_virtio_disk_details }}"
+ - "{{ he_conf_disk_details }}"
+ - "{{ he_metadata_disk_details }}"
+ - "{{ he_sanlock_disk_details }}"
+ register: prepareimage_results
+ changed_when: true
+ - name: Fetch Hosted Engine configuration disk path
+ ansible.builtin.set_fact:
+ he_conf_disk_path: >-
+ {{ (prepareimage_results.results|ovirt.ovirt.json_query("[?item.id=='" +
+ he_conf_disk_details.id + "'].stdout")|first|from_json).path }}
+ - name: Fetch Hosted Engine virtio disk path
+ ansible.builtin.set_fact:
+ he_virtio_disk_path: >-
+ {{ (prepareimage_results.results|ovirt.ovirt.json_query("[?item.id=='" +
+ he_virtio_disk_details.id + "'].stdout")|first|from_json).path }}
+ - name: Fetch Hosted Engine virtio metadata path
+ ansible.builtin.set_fact:
+ he_metadata_disk_path: >-
+ {{ (prepareimage_results.results|ovirt.ovirt.json_query("[?item.id=='" +
+ he_metadata_disk_details.id + "'].stdout")|first|from_json).path }}
+ - name: Shutdown local VM
+ ansible.builtin.command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} shutdown {{ he_vm_name }}Local"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Wait for local VM shutdown
+ ansible.builtin.command: virsh -r domstate "{{ he_vm_name }}Local"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: dominfo_out
+ until: dominfo_out.rc == 0 and 'shut off' in dominfo_out.stdout
+ retries: 120
+ delay: 5
+ - name: Undefine local VM
+ ansible.builtin.command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} undefine {{ he_vm_name }}Local"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Update libvirt default network configuration, destroy
+ ansible.builtin.command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} net-destroy default"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Update libvirt default network configuration, undefine
+ ansible.builtin.command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} net-undefine default"
+ environment: "{{ he_cmd_lang }}"
+ ignore_errors: true
+ changed_when: true
+ - name: Detect ovirt-hosted-engine-ha version
+ ansible.builtin.command: >-
+ /usr/libexec/platform-python -c
+ 'from ovirt_hosted_engine_ha.agent import constants as agentconst; print(agentconst.PACKAGE_VERSION)'
+ environment: "{{ he_cmd_lang }}"
+ register: ha_version_out
+ changed_when: true
+ - name: Set ha_version
+ ansible.builtin.set_fact: ha_version="{{ ha_version_out.stdout_lines|first }}"
+ - name: Create configuration templates
+ ansible.builtin.template:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ mode: 0644
+ with_items:
+ - {src: templates/vm.conf.j2, dest: "{{ he_local_vm_dir }}/vm.conf"}
+ - {src: templates/broker.conf.j2, dest: "{{ he_local_vm_dir }}/broker.conf"}
+ - {src: templates/version.j2, dest: "{{ he_local_vm_dir }}/version"}
+ - {src: templates/fhanswers.conf.j2, dest: "{{ he_local_vm_dir }}/fhanswers.conf"}
+ - {src: templates/hosted-engine.conf.j2, dest: "{{ he_local_vm_dir }}/hosted-engine.conf"}
+ - name: Create configuration archive
+ ansible.builtin.command: >-
+ tar --record-size=20480 -cvf {{ he_conf_disk_details.disk.image_id }}
+ vm.conf broker.conf version fhanswers.conf hosted-engine.conf
+ environment: "{{ he_cmd_lang }}"
+ args:
+ chdir: "{{ he_local_vm_dir }}"
+ warn: false
+ become: true
+ become_user: vdsm
+ become_method: sudo
+ changed_when: true
+ tags: ['skip_ansible_lint']
+ - name: Create ovirt-hosted-engine-ha run directory
+ ansible.builtin.file:
+ path: /var/run/ovirt-hosted-engine-ha
+ state: directory
+ mode: 0755
+ - name: Copy vm.conf to the right location on host
+ ansible.builtin.copy:
+ remote_src: true
+ src: "{{ he_local_vm_dir }}/vm.conf"
+ dest: "/var/run/ovirt-hosted-engine-ha"
+ owner: 'vdsm'
+ group: 'kvm'
+ mode: 0640
+ - name: Copy hosted-engine.conf to the right location on host
+ ansible.builtin.copy:
+ remote_src: true
+ src: "{{ he_local_vm_dir }}/hosted-engine.conf"
+ dest: "/etc/ovirt-hosted-engine/"
+ owner: 'vdsm'
+ group: 'kvm'
+ mode: 0440
+ - name: Check fapolicyd status
+ ansible.builtin.systemd:
+ name: fapolicyd
+ register: fapolicyd_s
+ - name: Set fapolicyd rules path
+ ansible.builtin.set_fact:
+ fapolicyd_rules_dir: /etc/fapolicyd/rules.d
+ - name: Verify fapolicyd/rules.d directory
+ ansible.builtin.stat:
+ path: "{{ fapolicyd_rules_dir }}"
+ register: fapolicy_rules
+ - name: Add rule to fapolicy
+ block:
+ - name: Add rule to /etc/fapolicyd/rules.d
+ ansible.builtin.copy:
+ src: 35-allow-ansible-for-vdsm.rules
+ dest: "{{ fapolicyd_rules_dir }}"
+ mode: 0644
+ - name: Restart fapolicyd service
+ ansible.builtin.service:
+ name: fapolicyd
+ state: restarted
+ when: fapolicyd_s.status.SubState == 'running' and fapolicy_rules.stat.exists
+ - name: Copy configuration archive to storage
+ ansible.builtin.command: >-
+ dd bs=20480 count=1 oflag=direct if="{{ he_local_vm_dir }}/{{ he_conf_disk_details.disk.image_id }}"
+ of="{{ he_conf_disk_path }}"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: vdsm
+ become_method: sudo
+ changed_when: true
+ args:
+ warn: false
+ - name: Initialize metadata volume
+ # Data is written at offset 4KiB*host_id and since ovirt supports 250 hosts per dc,
+ # we can have maximum 250*4KiB = ~ 1MiB
+ ansible.builtin.command: dd conv=notrunc bs=1M count=1 oflag=direct if=/dev/zero of="{{ he_metadata_disk_path }}"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: vdsm
+ become_method: sudo
+ changed_when: true
+ - include_tasks: ../get_local_vm_disk_path.yml
+ - name: Generate DHCP network configuration for the engine VM
+ ansible.builtin.template:
+ src: templates/ifcfg-eth0-dhcp.j2
+ dest: "{{ he_local_vm_dir }}/ifcfg-eth0"
+ owner: root
+ group: root
+ mode: 0644
+ when: he_vm_ip_addr is none
+ - name: Generate static network configuration for the engine VM, IPv4
+ ansible.builtin.template:
+ src: templates/ifcfg-eth0-static.j2
+ dest: "{{ he_local_vm_dir }}/ifcfg-eth0"
+ owner: root
+ group: root
+ mode: 0644
+ when: he_vm_ip_addr is not none and he_vm_ip_addr | ipv4
+ - name: Generate static network configuration for the engine VM, IPv6
+ ansible.builtin.template:
+ src: templates/ifcfg-eth0-static-ipv6.j2
+ dest: "{{ he_local_vm_dir }}/ifcfg-eth0"
+ owner: root
+ group: root
+ mode: 0644
+ when: he_vm_ip_addr is not none and he_vm_ip_addr | ipv6
+ - name: Inject network configuration with guestfish
+ ansible.builtin.command: >-
+ guestfish -a {{ local_vm_disk_path }} --rw -i copy-in "{{ he_local_vm_dir }}/ifcfg-eth0"
+ /etc/sysconfig/network-scripts {{ ":" }} selinux-relabel /etc/selinux/targeted/contexts/files/file_contexts
+ /etc/sysconfig/network-scripts/ifcfg-eth0 force{{ ":" }}true
+ environment:
+ LIBGUESTFS_BACKEND: direct
+ LANG: en_US.UTF-8
+ LC_MESSAGES: en_US.UTF-8
+ LC_ALL: en_US.UTF-8
+ changed_when: true
+ - name: Extract /etc/hosts from the Hosted Engine VM
+ ansible.builtin.command: virt-copy-out -a {{ local_vm_disk_path }} /etc/hosts "{{ he_local_vm_dir }}"
+ environment:
+ LIBGUESTFS_BACKEND: direct
+ LANG: en_US.UTF-8
+ LC_MESSAGES: en_US.UTF-8
+ LC_ALL: en_US.UTF-8
+ changed_when: true
+ - name: Clean /etc/hosts for the Hosted Engine VM for Engine VM FQDN
+ ansible.builtin.lineinfile:
+ dest: "{{ he_local_vm_dir }}/hosts"
+ regexp: "# hosted-engine-setup-{{ hostvars[he_ansible_host_name]['he_local_vm_dir'] }}$"
+ state: absent
+ - name: Add an entry on /etc/hosts for the Hosted Engine VM for the VM itself
+ ansible.builtin.lineinfile:
+ dest: "{{ he_local_vm_dir }}/hosts"
+ line: "{{ he_vm_ip_addr }} {{ he_fqdn }}"
+ state: present
+ when: he_vm_etc_hosts and he_vm_ip_addr is not none
+ - name: Clean /etc/hosts for the Hosted Engine VM for host address
+ ansible.builtin.lineinfile:
+ dest: "{{ he_local_vm_dir }}/hosts"
+ line: "{{ he_host_ip }} {{ he_host_address }}"
+ state: absent
+ when: not he_vm_etc_hosts
+ - name: Inject /etc/hosts with guestfish
+ ansible.builtin.command: >-
+ guestfish -a {{ local_vm_disk_path }} --rw -i copy-in "{{ he_local_vm_dir }}/hosts"
+ /etc {{ ":" }} selinux-relabel /etc/selinux/targeted/contexts/files/file_contexts
+ /etc/hosts force{{ ":" }}true
+ environment:
+ LIBGUESTFS_BACKEND: direct
+ LANG: en_US.UTF-8
+ LC_MESSAGES: en_US.UTF-8
+ LC_ALL: en_US.UTF-8
+ changed_when: true
+ - name: Copy local VM disk to shared storage
+ ansible.builtin.command: >-
+ qemu-img convert -f qcow2 -O raw -t none -T none {{ local_vm_disk_path }} {{ he_virtio_disk_path }}
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: vdsm
+ become_method: sudo
+ changed_when: true
+ - name: Verify copy of VM disk
+ ansible.builtin.command: qemu-img compare {{ local_vm_disk_path }} {{ he_virtio_disk_path }}
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: vdsm
+ become_method: sudo
+ changed_when: true
+ when: he_debug_mode|bool
+ - name: Remove rule from fapolicy
+ block:
+ - name: Remove rule from /etc/fapolicyd/rules.d
+ ansible.builtin.file:
+ path: "{{ fapolicyd_rules_dir }}/35-allow-ansible-for-vdsm.rules"
+ state: absent
+ - name: Restart fapolicyd service
+ ansible.builtin.service:
+ name: fapolicyd
+ state: restarted
+ when: fapolicyd_s.status.SubState == 'running' and fapolicy_rules.stat.exists
+ - name: Remove temporary entry in /etc/hosts for the local VM
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ regexp: "# temporary entry added by hosted-engine-setup for the bootstrap VM$"
+ state: absent
+ - name: Set the name for add_host
+ ansible.builtin.set_fact:
+ he_fqdn_ansible_host: "{{ he_fqdn }}"
+ - import_tasks: ../add_engine_as_ansible_host.yml
+ - name: Start ovirt-ha-broker service on the host
+ ansible.builtin.service:
+ name: ovirt-ha-broker
+ state: started
+ enabled: true
+ - name: Initialize lockspace volume
+ ansible.builtin.command: hosted-engine --reinitialize-lockspace --force
+ environment: "{{ he_cmd_lang }}"
+ register: result
+ until: result.rc == 0
+ ignore_errors: true
+ retries: 5
+ delay: 10
+ changed_when: true
+ - name: Initialize lockspace volume block
+ block:
+ - name: Workaround for ovirt-ha-broker start failures
+ # Ugly workaround for https://bugzilla.redhat.com/1768511
+ # fix it on ovirt-ha-broker side and remove ASAP
+ ansible.builtin.systemd:
+ state: restarted
+ enabled: true
+ name: ovirt-ha-broker
+ - name: Initialize lockspace volume
+ ansible.builtin.command: hosted-engine --reinitialize-lockspace --force
+ environment: "{{ he_cmd_lang }}"
+ register: result2
+ until: result2.rc == 0
+ retries: 5
+ delay: 10
+ changed_when: true
+ - name: Debug var result2
+ ansible.builtin.debug:
+ var: result2
+ when: result.rc != 0
+ - name: Start ovirt-ha-agent service on the host
+ ansible.builtin.service:
+ name: ovirt-ha-agent
+ state: started
+ enabled: true
+ - name: Exit HE maintenance mode
+ ansible.builtin.command: hosted-engine --set-maintenance --mode=none
+ environment: "{{ he_cmd_lang }}"
+ register: mresult
+ until: mresult.rc == 0
+ retries: 3
+ delay: 10
+ changed_when: true
+ - name: Wait for the engine to come up on the target VM
+ block:
+ - name: Check engine VM health
+ ansible.builtin.command: hosted-engine --vm-status --json
+ environment: "{{ he_cmd_lang }}"
+ register: health_result
+ until: >-
+ health_result.rc == 0 and 'health' in health_result.stdout and
+ health_result.stdout|from_json|ovirt.ovirt.json_query('*."engine-status"."health"')|first=="good" and
+ health_result.stdout|from_json|ovirt.ovirt.json_query('*."engine-status"."detail"')|first=="Up"
+ retries: 180
+ delay: 5
+ changed_when: true
+ - name: Debug var health_result
+ ansible.builtin.debug:
+ var: health_result
+ rescue:
+ - name: Check VM status at virt level
+ ansible.builtin.shell: virsh -r list | grep {{ he_vm_name }} | grep running
+ environment: "{{ he_cmd_lang }}"
+ ignore_errors: true
+ changed_when: true
+ register: vm_status_virsh
+ - name: Debug var vm_status_virsh
+ ansible.builtin.debug:
+ var: vm_status_virsh
+ - name: Fail if engine VM is not running
+ ansible.builtin.fail:
+ msg: "Engine VM is not running, please check vdsm logs"
+ when: vm_status_virsh.rc != 0
+ - name: Get target engine VM IP address
+ ansible.builtin.shell: getent {{ ip_key }} {{ he_fqdn }} | cut -d' ' -f1 | uniq
+ environment: "{{ he_cmd_lang }}"
+ register: engine_vm_ip
+ changed_when: true
+ - name: Get VDSM's target engine VM stats
+ ansible.builtin.command: vdsm-client VM getStats vmID={{ he_vm_details.vm.id }}
+ environment: "{{ he_cmd_lang }}"
+ register: engine_vdsm_stats
+ changed_when: true
+ - name: Convert stats to JSON format
+ ansible.builtin.set_fact: json_stats={{ engine_vdsm_stats.stdout|from_json }}
+ - name: Get target engine VM IP address from VDSM stats
+ ansible.builtin.set_fact: engine_vm_ip_vdsm={{ json_stats[0].guestIPs }}
+ - name: Debug var engine_vm_ip_vdsm
+ ansible.builtin.debug:
+ var: engine_vm_ip_vdsm
+ - name: Fail if Engine IP is different from engine's he_fqdn resolved IP
+ ansible.builtin.fail:
+ msg: >-
+ Engine VM IP address is {{ engine_vm_ip_vdsm }} while the engine's he_fqdn {{ he_fqdn }} resolves to
+ {{ engine_vm_ip.stdout_lines[0] }}. If you are using DHCP, check your DHCP reservation configuration
+ when: engine_vm_ip_vdsm != engine_vm_ip.stdout_lines[0]
+ - name: Fail is for any other reason the engine didn't started
+ ansible.builtin.fail:
+ msg: The engine failed to start inside the engine VM; please check engine.log.
+ - name: Get target engine VM address
+ ansible.builtin.shell: getent {{ ip_key }} {{ he_fqdn }} | cut -d ' ' -f1 | uniq
+ environment: "{{ he_cmd_lang }}"
+ register: engine_vm_ip
+ when: engine_vm_ip is not defined
+ changed_when: true
+ # Workaround for ovn-central being configured with the address of the bootstrap engine VM.
+ # Keep this aligned with:
+ # https://github.com/oVirt/ovirt-engine/blob/master/packaging/ansible-runner-service-project/project/roles/ovirt-provider-ovn-driver/tasks/main.yml
+ - name: Reconfigure OVN central address
+ ansible.builtin.command: vdsm-tool ovn-config {{ engine_vm_ip.stdout_lines[0] }} {{ he_mgmt_network }} {{ he_host_address }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ # Workaround for https://bugzilla.redhat.com/1540107
+ # the engine fails deleting a VM if its status in the engine DB
+ # is not up to date.
+ - include_tasks: ../auth_sso.yml
+ - name: Check for the local bootstrap engine VM
+ ovirt_vm_info:
+ pattern: id="{{ external_local_vm_uuid.stdout_lines|first }}"
+ auth: "{{ ovirt_auth }}"
+ register: local_vm_f
+ - name: Remove the bootstrap local VM
+ block:
+ - name: Make the engine aware that the external VM is stopped
+ ignore_errors: true
+ ovirt_vm:
+ state: stopped
+ id: "{{ external_local_vm_uuid.stdout_lines|first }}"
+ auth: "{{ ovirt_auth }}"
+ register: vmstop_result
+ - name: Debug var vmstop_result
+ ansible.builtin.debug:
+ var: vmstop_result
+ - name: Wait for the local bootstrap engine VM to be down at engine eyes
+ ovirt_vm_info:
+ pattern: id="{{ external_local_vm_uuid.stdout_lines|first }}"
+ auth: "{{ ovirt_auth }}"
+ register: local_vm_status
+ until: local_vm_status.ovirt_vms[0].status == "down"
+ retries: 24
+ delay: 5
+ - name: Debug var local_vm_status
+ ansible.builtin.debug:
+ var: local_vm_status
+ - name: Remove bootstrap external VM from the engine
+ ovirt_vm:
+ state: absent
+ id: "{{ external_local_vm_uuid.stdout_lines|first }}"
+ auth: "{{ ovirt_auth }}"
+ register: vmremove_result
+ - name: Debug var vmremove_result
+ ansible.builtin.debug:
+ var: vmremove_result
+ when: local_vm_f.ovirt_vms|length > 0
+ - name: Remove ovirt-engine-appliance rpm
+ ansible.builtin.yum:
+ name: ovirt-engine-appliance
+ state: absent
+ register: yum_result
+ until: yum_result is success
+ retries: 10
+ delay: 5
+ when: he_remove_appliance_rpm|bool
+
+ - name: Include custom tasks for after setup customization
+ include_tasks: "{{ after_setup_item }}"
+ with_fileglob: "hooks/after_setup/*.yml"
+ loop_control:
+ loop_var: after_setup_item
+ register: after_setup_results
+ rescue:
+ - name: Fetch logs from the engine VM
+ include_tasks: ../fetch_engine_logs.yml
+ ignore_errors: true
+ - name: Notify the user about a failure
+ ansible.builtin.fail:
+ msg: >
+ The system may not be provisioned according to the playbook
+ results: please check the logs for the issue,
+ fix accordingly or re-deploy from scratch.
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fc_getdevices.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fc_getdevices.yml
new file mode 100644
index 000000000..bcf55fbd5
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fc_getdevices.yml
@@ -0,0 +1,10 @@
+---
+- include_tasks: auth_sso.yml
+- name: Get Fibre Channel LUNs
+ ovirt_host_storage_info:
+ host: "{{ he_host_name }}"
+ fcp:
+ lun_id: -1 # currently it is unused and I use it to turn on FC filtering
+ auth: "{{ ovirt_auth }}"
+ register: otopi_fc_devices
+...
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_engine_logs.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_engine_logs.yml
new file mode 100644
index 000000000..d01f53b82
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_engine_logs.yml
@@ -0,0 +1,41 @@
+---
+- name: Set destination directory path
+ ansible.builtin.set_fact:
+ destdir=/var/log/ovirt-hosted-engine-setup/engine-logs-{{ ansible_date_time.iso8601 }}/
+- name: Create destination directory
+ ansible.builtin.file:
+ state: directory
+ path: "{{ destdir }}"
+ owner: root
+ group: root
+ mode: 0700
+- include_tasks: get_local_vm_disk_path.yml
+- name: Give the vm time to flush dirty buffers
+ ansible.builtin.wait_for:
+ timeout: 10
+ delegate_to: localhost
+ become: false
+- name: Copy engine logs
+ ansible.builtin.command: virt-copy-out -a {{ local_vm_disk_path }} {{ item }} {{ destdir }}
+ environment:
+ LIBGUESTFS_BACKEND: direct
+ LANG: en_US.UTF-8
+ LC_MESSAGES: en_US.UTF-8
+ LC_ALL: en_US.UTF-8
+ ignore_errors: true
+ changed_when: true
+ with_items:
+ - /var/log
+ when: local_vm_disk_path is defined
+- name: Change ownership of copied engine logs
+ # Files owned by ovirt user/group, will be created owned by uid/gid 108, as
+ # the ovirt user/group do not exist on hosts. OpenSCAP requires having an
+ # owner/group, and no other process should automatically have access. So
+ # using 'root' is good enough.
+ ansible.builtin.file:
+ path: "{{ destdir }}"
+ owner: root
+ group: root
+ recurse: true
+ ignore_errors: true
+ changed_when: true
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_host_ip.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_host_ip.yml
new file mode 100644
index 000000000..53907369c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_host_ip.yml
@@ -0,0 +1,41 @@
+---
+- name: Get full hostname
+ ansible.builtin.command: hostname -f
+ changed_when: true
+ register: host_full_name
+- name: Set hostname variable if not defined
+ ansible.builtin.set_fact:
+ he_host_name: "{{ host_full_name.stdout_lines[0] }}"
+ when: he_host_name is none
+- name: Define host address variable if not defined
+ ansible.builtin.set_fact:
+ he_host_address: "{{ host_full_name.stdout_lines[0] }}"
+ when: he_host_address is none
+- name: Get host IP address
+ block:
+ - name: Choose IPv4, IPv6 or auto
+ import_tasks: ipv_switch.yml
+ - name: Get host address resolution
+ ansible.builtin.shell: getent {{ ip_key }} {{ he_host_address }} | grep STREAM
+ register: hostname_resolution_output
+ changed_when: true
+ ignore_errors: true
+ - name: Check address resolution
+ ansible.builtin.fail:
+ msg: >
+ Unable to resolve address
+ when: hostname_resolution_output.rc != 0
+ - name: Parse host address resolution
+ ansible.builtin.set_fact:
+ he_host_ip: "{{
+ (
+ hostname_resolution_output.stdout.split() | ipaddr |
+ difference(hostname_resolution_output.stdout.split() |
+ ipaddr('link-local')
+ )
+ )[0]
+ }}"
+- name: Fail if host's ip is empty
+ ansible.builtin.fail:
+ msg: Host has no IP address
+ when: he_host_ip is none
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_team_devices.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_team_devices.yml
new file mode 100644
index 000000000..cfd0883d6
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_team_devices.yml
@@ -0,0 +1,33 @@
+---
+- name: Collect interface types
+ ansible.builtin.shell: set -euo pipefail && nmcli -g GENERAL.TYPE device show {{ nic }}
+ with_items:
+ - "{{ host_net }}"
+ loop_control:
+ loop_var: nic
+ changed_when: true
+ register: interface_types
+- name: Check for Team devices
+ ansible.builtin.set_fact:
+ is_team: "{{ nic_if.stdout.find('team') > 0 }}"
+ when: nic_if.stdout.find('team') != -1
+ with_items:
+ - "{{ interface_types.results }}"
+ loop_control:
+ loop_var: nic_if
+ register: team_list
+- name: Get list of Team devices
+ ansible.builtin.set_fact:
+ team_if: "{{ team_list.results | reject('skipped') | map(attribute='nic_if.nic') | list }}"
+- name: Collect Team devices
+ ansible.builtin.set_fact:
+ team_if: "{{ team_list.results | reject('skipped') | map(attribute='nic_if.nic') | list }}"
+- name: Filter team devices
+ ansible.builtin.set_fact:
+ team_if_diff: "{{ host_net | difference(team_if) }}"
+- name: Fail if only team devices are available
+ ansible.builtin.fail:
+ msg: >-
+ Only Team devices {{ team_if | join(', ') }} are present.
+ Teaming is unsupported.
+ when: (team_if_diff | length == 0)
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_unsupported_vlan_devices.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_unsupported_vlan_devices.yml
new file mode 100644
index 000000000..49a5a3f7b
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_unsupported_vlan_devices.yml
@@ -0,0 +1,64 @@
+---
+- name: Search VLAN devices
+ ansible.builtin.set_fact:
+ is_vlan: "{{ nic_if.stdout == 'vlan' }}"
+ when: nic_if.stdout.find('vlan') != -1
+ with_items:
+ - "{{ interface_types.results }}"
+ loop_control:
+ loop_var: nic_if
+ register: vlan_list
+- name: Check for base interface of VLAN devices
+ ansible.builtin.command: nmcli -g VLAN.PARENT device show {{ vlan_device.nic_if.nic }}
+ when: vlan_device.ansible_facts is defined and vlan_device.ansible_facts.is_vlan
+ with_items:
+ - "{{ vlan_list.results | reject('skipped') | list }}"
+ loop_control:
+ loop_var: vlan_device
+ register: vlan_base_interfaces
+- name: Get base interface types of VLAN devices
+ ansible.builtin.command: nmcli -g GENERAL.TYPE device show {{ vlan_base_interface.stdout }}
+ when: vlan_base_interface.skipped is undefined and vlan_base_interface.stdout is defined
+ with_items:
+ - "{{ vlan_base_interfaces.results }}"
+ loop_control:
+ loop_var: vlan_base_interface
+ register: vlan_base_types
+- name: Check for bond as base type of VLAN device
+ ansible.builtin.set_fact:
+ bond_parent: "{{ vlan_base_type.vlan_base_interface.stdout }}"
+ when: vlan_base_type.skipped is undefined and vlan_base_type.stdout is defined and vlan_base_type.stdout == "bond"
+ with_items:
+ - "{{ vlan_base_types.results | reject('skipped') | list }}"
+ loop_control:
+ loop_var: vlan_base_type
+ register: vlan_bond_list
+- name: Check if bond base interface of VLAN device is in supported mode
+ ansible.builtin.set_fact:
+ bond_parent_mode: "{{ hostvars[inventory_hostname]['ansible_' + vlan_bond_device.ansible_facts.bond_parent]['mode'] }}"
+ vlan_bond_valid_if: "{{ vlan_bond_device.vlan_base_type.vlan_base_interface.vlan_device.nic_if.nic }}"
+ is_valid_bond_mode: "{{ hostvars[inventory_hostname]['ansible_' + vlan_bond_device.ansible_facts.bond_parent]['mode'] in acceptable_bond_modes }}"
+ with_items: "{{ vlan_bond_list.results | reject('skipped') | list }}"
+ loop_control:
+ loop_var: vlan_bond_device
+ register: vlan_bond_valid_mode_list
+- name: Collect VLAN devices with bad bond mode base interfaces
+ ansible.builtin.set_fact:
+ bbm_vlan: "{{ vlan_bond_item.ansible_facts.vlan_bond_valid_if }}"
+ when: not vlan_bond_item.ansible_facts.is_valid_bond_mode
+ with_items:
+ - "{{ vlan_bond_valid_mode_list.results }}"
+ loop_control:
+ loop_var: vlan_bond_item
+ register: bbm_vlan_list
+- name: Generate invalid VLANs list
+ ansible.builtin.set_fact:
+ bad_vlan_bond_list: "{{ bbm_vlan_list.results | reject('skipped') | map(attribute='vlan_bond_item.ansible_facts.vlan_bond_valid_if') | list }}"
+- name: Create list of unsupported network devices
+ ansible.builtin.set_fact:
+ invalid_net_if: "{{ bad_vlan_bond_list + team_if }}"
+- import_tasks: validate_vlan_name.yml
+- import_tasks: validate_vlan_bond_mode.yml
+- name: Generate list of all unsupported VLAN devices
+ ansible.builtin.set_fact:
+ invalid_vlan_if: "{{ bad_vlan_bond_list + bad_vlan_names_list }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/final_clean.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/final_clean.yml
new file mode 100644
index 000000000..1000d2ed2
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/final_clean.yml
@@ -0,0 +1,11 @@
+---
+- name: Clean temporary resources
+ block:
+ - name: Fetch logs from the engine VM
+ include_tasks: fetch_engine_logs.yml
+ ignore_errors: true
+ - include_tasks: clean_localvm_dir.yml
+ - name: Clean local storage pools
+ include_tasks: clean_local_storage_pools.yml
+ ignore_errors: true
+...
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/full_execution.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/full_execution.yml
new file mode 100644
index 000000000..26e749d1c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/full_execution.yml
@@ -0,0 +1,72 @@
+---
+- name: Install packages and bootstrap local engine VM
+ block:
+ - name: Install required packages for oVirt Hosted Engine deployment
+ import_tasks: install_packages.yml
+ when: not he_offline_deployment|bool
+
+ - name: System configuration validations
+ include_tasks: "{{ pre_checks_item }}"
+ with_fileglob: "pre_checks/*.yml"
+ loop_control:
+ loop_var: pre_checks_item
+
+ - name: Clean environment before deployment
+ import_tasks: initial_clean.yml
+
+ - name: 01_02 bootstrap local vm tasks
+ block:
+ - name: 01 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/01_prepare_routing_rules.yml
+
+ - name: 02 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/02_create_local_vm.yml
+
+- name: Local engine VM installation - Pre tasks
+ block:
+ - name: 03 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/03_engine_initial_tasks.yml
+ delegate_to: "{{ groups.engine[0] }}"
+
+- name: Engine Setup on local VM
+ block:
+ - name: Engine Setup on local VM
+ vars:
+ ovirt_engine_setup_hostname: "{{ he_fqdn.split('.')[0] }}"
+ ovirt_engine_setup_organization: "{{ he_cloud_init_domain_name }}"
+ ovirt_engine_setup_dwh_db_host: "{{ he_fqdn.split('.')[0] }}"
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_engine_setup_answer_file_path: /root/ovirt-engine-answers
+ ovirt_engine_setup_use_remote_answer_file: true
+ ovirt_engine_setup_offline: "{{ he_offline_deployment }}"
+ ovirt_engine_setup_package_list: "{{ he_additional_package_list }}"
+ ovirt_engine_setup_admin_password: "{{ he_admin_password }}"
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.engine_setup
+ delegate_to: "{{ groups.engine[0] }}"
+
+- name: Local engine VM installation - Post tasks
+ block:
+ - name: 04 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/04_engine_final_tasks.yml
+ delegate_to: "{{ groups.engine[0] }}"
+
+- name: Configure engine VM on a storage domain
+ block:
+ - name: 05 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/05_add_host.yml
+ - name: Create Storage Domain
+ import_tasks: create_storage_domain.yml
+ - name: Create target hosted engine vm
+ import_tasks: create_target_vm/01_create_target_hosted_engine_vm.yml
+
+- name: Configure database settings
+ import_tasks: create_target_vm/02_engine_vm_configuration.yml
+ delegate_to: "{{ groups.engine[0] }}"
+
+- name: Closeup
+ block:
+ - name: Hosted engine final tasks
+ import_tasks: create_target_vm/03_hosted_engine_final_tasks.yml
+ - name: Final clean
+ import_tasks: final_clean.yml
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_appliance_dist.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_appliance_dist.yml
new file mode 100644
index 000000000..1b17f64cd
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_appliance_dist.yml
@@ -0,0 +1,9 @@
+---
+- name: Get appliance distribution
+ ansible.builtin.setup:
+ filter: ansible_distribution*
+ register: ansible_appliance_dist
+- name: Set appliance distribution variables
+ ansible.builtin.set_fact:
+ appliance_dist: "{{ ansible_appliance_dist.ansible_facts.ansible_distribution }}"
+ appliance_ver: "{{ ansible_appliance_dist.ansible_facts.ansible_distribution_major_version }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_local_vm_disk_path.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_local_vm_disk_path.yml
new file mode 100644
index 000000000..6cf78a41c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_local_vm_disk_path.yml
@@ -0,0 +1,12 @@
+---
+- name: Find the local appliance image
+ ansible.builtin.find:
+ paths: "{{ he_local_vm_dir }}/images"
+ recurse: true
+ patterns: ^.*.(?<!meta)$
+ use_regex: true
+ register: app_img
+- name: Set local_vm_disk_path
+ ansible.builtin.set_fact:
+ local_vm_disk_path={{ app_img.files[0].path }}
+ when: app_img.files|length > 0
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/initial_clean.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/initial_clean.yml
new file mode 100644
index 000000000..b95aa68d5
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/initial_clean.yml
@@ -0,0 +1,150 @@
+---
+- name: initial clean
+ tags: he_initial_clean
+ block:
+ - name: Stop libvirt service
+ ansible.builtin.service:
+ name: libvirtd
+ state: stopped
+ enabled: true
+ - name: Drop vdsm config statements
+ ansible.builtin.shell: >-
+ [ -r {{ item }} ] && sed -i
+ '/## beginning of configuration section by
+ vdsm-4.[0-9]\+.[0-9]\+/,/## end of configuration section by vdsm-4.[0-9]\+.[0-9]\+/d' {{ item }} || :
+ environment: "{{ he_cmd_lang }}"
+ args:
+ warn: false
+ with_items:
+ - /etc/libvirt/libvirtd.conf
+ - /etc/libvirt/qemu.conf
+ - /etc/libvirt/qemu-sanlock.conf
+ - /etc/sysconfig/libvirtd
+ tags: ['skip_ansible_lint']
+ - name: Drop VNC encryption config statements
+ ansible.builtin.command: >-
+ sed -i
+ '/## beginning of configuration section for VNC encryption/,/##
+ end of configuration section for VNC encryption\+/d' /etc/libvirt/qemu.conf
+ args:
+ warn: false
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Check if vdsm's abrt-action-save-package-data config exists
+ ansible.builtin.stat:
+ path: /etc/abrt/abrt-action-save-package-data.conf
+ register: abrt_vdsm_config
+ - name: Check if abrt is installed
+ ansible.builtin.stat:
+ path: /usr/share/abrt/conf.d/abrt-action-save-package-data.conf
+ register: abrt_installed_config
+ - name: Restore initial abrt config files
+ ansible.builtin.copy:
+ remote_src: true
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ mode: preserve
+ with_items:
+ - {
+ src: /usr/share/abrt/conf.d/abrt-action-save-package-data.conf,
+ dest: /etc/abrt/abrt-action-save-package-data.conf
+ }
+ - {
+ src: /usr/share/abrt/conf.d/abrt.conf,
+ dest: /etc/abrt/abrt.conf
+ }
+ - {
+ src: /usr/share/abrt/conf.d/plugins/CCpp.conf,
+ dest: /etc/abrt/plugins/CCpp.conf
+ }
+ - {
+ src: /usr/share/abrt/conf.d/plugins/vmcore.conf,
+ dest: /etc/abrt/plugins/vmcore.conf
+ }
+ when:
+ - abrt_vdsm_config.stat.exists
+ - abrt_installed_config.stat.exists
+ - name: Restart abrtd service
+ ansible.builtin.service:
+ name: abrtd
+ state: restarted
+ when:
+ - abrt_vdsm_config.stat.exists
+ - abrt_installed_config.stat.exists
+ - name: Remove vdsm's abrt config files
+ ansible.builtin.file:
+ state: absent
+ path: "{{ item }}"
+ with_items:
+ - /etc/abrt/abrt-action-save-package-data.conf
+ - /etc/abrt/abrt.conf
+ - /etc/abrt/plugins/CCpp.conf
+ - /etc/abrt/plugins/vmcore.conf
+ when:
+ - abrt_vdsm_config.stat.exists
+ - not abrt_installed_config.stat.exists
+ - name: Drop libvirt sasl2 configuration by vdsm
+ ansible.builtin.command: >-
+ sed -i '/## start vdsm-4.[0-9]\+.[0-9]\+ configuration/,/## end vdsm configuration/d' /etc/sasl2/libvirt.conf
+ environment: "{{ he_cmd_lang }}"
+ args:
+ warn: false
+ tags: ['skip_ansible_lint']
+ - name: Stop and disable services
+ ansible.builtin.service:
+ name: "{{ item }}"
+ state: stopped
+ enabled: false
+ with_items:
+ - ovirt-ha-agent
+ - ovirt-ha-broker
+ - vdsmd
+ - libvirtd-tls.socket
+ - name: Restore initial libvirt default network configuration
+ ansible.builtin.copy:
+ remote_src: true
+ src: /usr/share/libvirt/networks/default.xml
+ dest: /etc/libvirt/qemu/networks/default.xml
+ mode: preserve
+ - name: Start libvirt
+ ansible.builtin.service:
+ name: libvirtd
+ state: started
+ enabled: true
+ - name: Check for leftover local Hosted Engine VM
+ ansible.builtin.shell: virsh list | grep {{ he_vm_name }}Local | cat
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: local_vm_list
+ - name: Destroy leftover local Hosted Engine VM
+ ansible.builtin.command: virsh destroy {{ he_vm_name }}Local
+ environment: "{{ he_cmd_lang }}"
+ ignore_errors: true
+ when: local_vm_list.stdout_lines|length >= 1
+ - name: Check for leftover defined local Hosted Engine VM
+ ansible.builtin.shell: virsh list --all | grep {{ he_vm_name }}Local | cat
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: local_vm_list_all
+ - name: Undefine leftover local engine VM
+ ansible.builtin.command: virsh undefine --managed-save {{ he_vm_name }}Local
+ environment: "{{ he_cmd_lang }}"
+ when: local_vm_list_all.stdout_lines|length >= 1
+ changed_when: true
+ - name: Check for leftover defined Hosted Engine VM
+ ansible.builtin.shell: virsh list --all | grep {{ he_vm_name }} | cat
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: target_vm_list_all
+ - name: Undefine leftover engine VM
+ ansible.builtin.command: virsh undefine --managed-save {{ he_vm_name }}
+ environment: "{{ he_cmd_lang }}"
+ when: target_vm_list_all.stdout_lines|length >= 1
+ changed_when: true
+ - name: Remove eventually entries for the local VM from known_hosts file
+ ansible.builtin.known_hosts:
+ name: "{{ he_fqdn }}"
+ state: absent
+ delegate_to: localhost
+ become: false
+...
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_appliance.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_appliance.yml
new file mode 100644
index 000000000..6f5ce0f90
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_appliance.yml
@@ -0,0 +1,33 @@
+---
+- name: Install ovirt-engine-appliance rpm
+ ansible.builtin.yum:
+ name: ovirt-engine-appliance
+ state: present
+ when: not he_offline_deployment|bool
+ register: task_result
+ until: task_result is success
+ retries: 10
+ delay: 2
+- name: Parse appliance configuration for path
+ ansible.builtin.shell: set -euo pipefail && grep path /etc/ovirt-hosted-engine/10-appliance.conf | cut -f2 -d'='
+ environment: "{{ he_cmd_lang }}"
+ register: he_appliance_ova_out
+ changed_when: true
+- name: Parse appliance configuration for sha1sum
+ ansible.builtin.shell: set -euo pipefail && grep sha1sum /etc/ovirt-hosted-engine/10-appliance.conf | cut -f2 -d'='
+ environment: "{{ he_cmd_lang }}"
+ register: he_appliance_ova_sha1
+ changed_when: true
+- name: Get OVA path
+ ansible.builtin.set_fact:
+ he_appliance_ova_path: "{{ he_appliance_ova_out.stdout_lines|first }}"
+ cacheable: true
+- name: Compute sha1sum
+ ansible.builtin.stat:
+ path: "{{ he_appliance_ova_path }}"
+ checksum_algorithm: sha1
+ register: ova_stats
+- name: Compare sha1sum
+ ansible.builtin.fail:
+ msg: "{{ he_appliance_ova_path }} is corrupted (sha1sum)"
+ when: he_appliance_ova_sha1.stdout_lines|first != ova_stats.stat.checksum
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_packages.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_packages.yml
new file mode 100644
index 000000000..44993f1d9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_packages.yml
@@ -0,0 +1,9 @@
+---
+- name: Install oVirt Hosted Engine packages
+ ansible.builtin.package:
+ name: "ovirt-hosted-engine-setup"
+ state: present
+ register: task_result
+ until: task_result is success
+ retries: 10
+ delay: 2
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/ipv_switch.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/ipv_switch.yml
new file mode 100644
index 000000000..99dc551a2
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/ipv_switch.yml
@@ -0,0 +1,11 @@
+---
+- name: Choose IPv4, IPv6 or auto
+ block:
+ - name: Fail if he_force_ip4 and he_force_ip6 are set at the same time
+ ansible.builtin.fail:
+ msg: he_force_ip4 and he_force_ip6 cannot be used at the same time
+ when: he_force_ip4 and he_force_ip6
+ - name: Prepare getent key
+ ansible.builtin.set_fact:
+ ip_key: "{{ 'ahostsv4' if he_force_ip4 else 'ahostsv6' if he_force_ip6 else 'ahosts' }}"
+ when: ip_key is not defined
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_discover.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_discover.yml
new file mode 100644
index 000000000..1363d3be6
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_discover.yml
@@ -0,0 +1,25 @@
+---
+- include_tasks: auth_sso.yml
+- name: Fetch host facts
+ ovirt_host_info:
+ pattern: name={{ he_host_name }}
+ auth: "{{ ovirt_auth }}"
+ register: host_result
+ until: host_result is succeeded and host_result.ovirt_hosts|length >= 1
+ retries: 50
+ delay: 10
+- name: iSCSI discover
+ ovirt_host:
+ auth: "{{ ovirt_auth }}"
+ state: iscsidiscover
+ name: "{{ he_host_name }}"
+ iscsi:
+ address: "{{ he_iscsi_portal_addr }}"
+ port: "{{ he_iscsi_portal_port }}"
+ username: "{{ he_iscsi_discover_username | default(omit) }}"
+ password: "{{ he_iscsi_discover_password | default(omit) }}"
+ register: otopi_iscsi_targets
+# TODO: perform an iSCSI logout when viable, see:
+# https://bugzilla.redhat.com/show_bug.cgi?id=1535951
+# https://github.com/ansible/ansible/issues/35039
+...
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_getdevices.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_getdevices.yml
new file mode 100644
index 000000000..293fc4ba5
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_getdevices.yml
@@ -0,0 +1,34 @@
+---
+- include_tasks: auth_sso.yml
+- name: iSCSI login
+ ovirt_host:
+ name: "{{ he_host_name }}"
+ state: iscsilogin
+ timeout: 30
+ iscsi:
+ username: "{{ he_iscsi_username }}"
+ password: "{{ he_iscsi_password }}"
+ address: "{{ item.0 }}"
+ port: "{{ item.1 }}"
+ target: "{{ he_iscsi_target }}"
+ portal: "{{ he_iscsi_tpgt | default(omit) }}"
+ auth: "{{ ovirt_auth }}"
+ no_log: true
+ ignore_errors: true
+ # TODO: avoid the with_together loop once
+ # https://github.com/ansible/ansible/issues/32640 got properly fixed
+ with_together:
+ - "{{ he_iscsi_portal_addr.split(',') }}"
+ - "{{ he_iscsi_portal_port.split(',') if he_iscsi_portal_port is string else he_iscsi_portal_port }}"
+- name: Get iSCSI LUNs
+ ovirt_host_storage_info:
+ host: "{{ he_host_name }}"
+ iscsi:
+ username: "{{ he_iscsi_username }}"
+ password: "{{ he_iscsi_password }}"
+ address: "{{ he_iscsi_portal_addr.split(',')|first }}"
+ port: "{{ he_iscsi_portal_port.split(',')|first if he_iscsi_portal_port is string else he_iscsi_portal_port }}"
+ target: "{{ he_iscsi_target }}"
+ auth: "{{ ovirt_auth }}"
+ register: otopi_iscsi_devices
+...
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/main.yml
new file mode 100644
index 000000000..b6b920900
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+- name: Deploy hosted-engine
+ import_tasks: full_execution.yml
+ tags: always
+
+- name: Execute just a specific set of steps
+ include_tasks: partial_execution.yml
+ tags:
+ - initial_clean
+ - final_clean
+ - bootstrap_local_vm
+ - create_storage_domain
+ - create_target_vm
+ - iscsi_discover
+ - iscsi_getdevices
+ - fc_getdevices
+ - get_network_interfaces
+ - validate_hostnames
+ - never
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/partial_execution.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/partial_execution.yml
new file mode 100644
index 000000000..3f7ba5ba6
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/partial_execution.yml
@@ -0,0 +1,155 @@
+---
+- name: Force facts gathering
+ ansible.builtin.setup:
+ tags:
+ - initial_clean
+ - final_clean
+ - bootstrap_local_vm
+ - create_storage_domain
+ - create_target_vm
+ - iscsi_discover
+ - iscsi_getdevices
+ - fc_getdevices
+ - get_network_interfaces
+ - validate_hostnames
+ - never
+
+
+- name: Initial validations and cleanups
+ block:
+ - name: Install required packages for oVirt Hosted Engine deployment
+ import_tasks: install_packages.yml
+ when: not he_offline_deployment|bool
+
+ - name: System configuration validations
+ include_tasks: "{{ pre_checks_item }}"
+ with_fileglob: "pre_checks/*.yml"
+ loop_control:
+ loop_var: pre_checks_item
+
+ - name: Clean environment before deployment
+ import_tasks: initial_clean.yml
+ tags: [initial_clean, bootstrap_local_vm, never]
+
+
+- name: Bootstrap local engine VM
+ block:
+ - name: Bootstrap local engine VM
+ block:
+ - name: 01 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/01_prepare_routing_rules.yml
+
+ - name: 02 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/02_create_local_vm.yml
+
+ - name: Local engine VM installation - Pre tasks
+ block:
+ - name: 03 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/03_engine_initial_tasks.yml
+ delegate_to: "{{ groups.engine[0] }}"
+
+ - name: Engine Setup on local VM
+ block:
+ - name: Run engine-setup
+ vars:
+ ovirt_engine_setup_hostname: "{{ he_fqdn.split('.')[0] }}"
+ ovirt_engine_setup_organization: "{{ he_cloud_init_domain_name }}"
+ ovirt_engine_setup_dwh_db_host: "{{ he_fqdn.split('.')[0] }}"
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_engine_setup_answer_file_path: /root/ovirt-engine-answers
+ ovirt_engine_setup_use_remote_answer_file: true
+ ovirt_engine_setup_offline: "{{ he_offline_deployment }}"
+ ovirt_engine_setup_package_list: "{{ he_additional_package_list }}"
+ ovirt_engine_setup_admin_password: "{{ he_admin_password }}"
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.engine_setup
+ delegate_to: "{{ groups.engine[0] }}"
+ rescue:
+ - name: Sync on engine machine
+ ansible.builtin.command: sync
+ changed_when: true
+ delegate_to: "{{ groups.engine[0] }}"
+ - name: Fetch logs from the engine VM
+ import_tasks: fetch_engine_logs.yml
+ ignore_errors: true
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Notify the user about a failure
+ ansible.builtin.fail:
+ msg: >
+ There was a failure deploying the engine on the local engine VM.
+ The system may not be provisioned according to the playbook
+ results: please check the logs for the issue,
+ fix accordingly or re-deploy from scratch.
+
+ - name: Local engine VM installation - Post tasks
+ block:
+ - name: 04 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/04_engine_final_tasks.yml
+ delegate_to: "{{ groups.engine[0] }}"
+
+ - name: Add first HE host
+ block:
+ - name: 05 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/05_add_host.yml
+ tags: [bootstrap_local_vm, never]
+
+
+- name: Create hosted-engine storage domain
+ block:
+ - name: Create Storage Domain
+ import_tasks: create_storage_domain.yml
+ tags: [create_storage_domain, never]
+
+
+- name: Create and configure target VM
+ block:
+ - name: Fetch host IP address
+ import_tasks: fetch_host_ip.yml
+
+ - name: Create target hosted engine vm
+ import_tasks: create_target_vm/01_create_target_hosted_engine_vm.yml
+
+ - name: Configure database settings
+ import_tasks: create_target_vm/02_engine_vm_configuration.yml
+ delegate_to: "{{ groups.engine[0] }}"
+ tags: [create_target_vm, never]
+
+
+- name: Hosted engine final tasks
+ import_tasks: create_target_vm/03_hosted_engine_final_tasks.yml
+ tags: [create_target_vm, never]
+
+- name: Sync on engine machine
+ import_tasks: sync_on_engine_machine.yml
+ changed_when: true
+ ignore_errors: true
+ tags: [final_clean, never]
+
+- name: Final clean
+ import_tasks: final_clean.yml
+ tags: [final_clean, never]
+
+
+- name: Validate network interface
+ import_tasks: "pre_checks/001_validate_network_interfaces.yml"
+ tags: [get_network_interfaces, never]
+
+
+- name: Validate hostnames
+ import_tasks: "pre_checks/002_validate_hostname_tasks.yml"
+ tags: [validate_hostnames, never]
+
+
+- name: Get FC devices
+ import_tasks: "fc_getdevices.yml"
+ tags: [fc_getdevices, never]
+
+
+- name: iSCSI discover
+ import_tasks: "iscsi_discover.yml"
+ tags: [iscsi_discover, never]
+
+
+- name: Get iSCSI devices
+ import_tasks: "iscsi_getdevices.yml"
+ tags: [iscsi_getdevices, never]
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pause_execution.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pause_execution.yml
new file mode 100644
index 000000000..b767f3a92
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pause_execution.yml
@@ -0,0 +1,13 @@
+---
+- name: Create temporary lock file
+ ansible.builtin.tempfile:
+ state: file
+ suffix: _he_setup_lock
+ delegate_to: localhost
+ register: he_setup_lock_file
+- name: Pause execution until {{ he_setup_lock_file.path }} is removed, delete it once ready to proceed
+ ansible.builtin.wait_for:
+ path: "{{ he_setup_lock_file.path }}"
+ state: absent
+ timeout: 86400 # 24 hours
+ delegate_to: localhost
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/001_validate_network_interfaces.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/001_validate_network_interfaces.yml
new file mode 100644
index 000000000..d71306300
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/001_validate_network_interfaces.yml
@@ -0,0 +1,92 @@
+---
+- name: Network interfaces
+ block:
+ - name: Detecting interface on existing management bridge
+ ansible.builtin.set_fact:
+ bridge_interface="{{ hostvars[inventory_hostname]['ansible_' + bridge_name ]['interfaces']|first }}"
+ when: "'ansible_' + bridge_name in hostvars[inventory_hostname]"
+ with_items:
+ - 'ovirtmgmt'
+ - 'rhevm'
+ loop_control:
+ loop_var: bridge_name
+ - name: Set variable for supported bond modes
+ ansible.builtin.set_fact:
+ acceptable_bond_modes: ['active-backup', 'balance-xor', 'broadcast', '802.3ad']
+ - name: Get all active network interfaces
+ ansible.builtin.set_fact:
+ otopi_net_host="{{ hostvars[inventory_hostname]['ansible_' + iface_item]['device'] }}"
+ type="{{ hostvars[inventory_hostname]['ansible_' + iface_item]['type'] }}"
+ bond_valid_name="{{ iface_item | regex_search('(^bond[0-9]+)') }}"
+ when: (
+ (
+ iface_item != 'lo'
+ ) and (
+ bridge_interface is not defined
+ ) and (
+ 'active' in hostvars[inventory_hostname]['ansible_' + iface_item] and
+ hostvars[inventory_hostname]['ansible_' + iface_item]['active']
+ ) and (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['type'] != 'bridge'
+ ) and (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['ipv4'] is defined or
+ hostvars[inventory_hostname]['ansible_' + iface_item]['ipv6'] is defined
+ ) and (
+ (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['type'] != 'bonding'
+ ) or (
+ (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['type'] == 'bonding'
+ ) and (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['slaves'][0] is defined
+ ) and (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['mode'] in acceptable_bond_modes
+ )
+ )
+ )
+ )
+ with_items:
+ - "{{ ansible_interfaces | map('replace', '-','_') | list }}"
+ loop_control:
+ loop_var: iface_item
+ register: valid_network_interfaces
+ - name: Filter bonds with bad naming
+ ansible.builtin.set_fact:
+ net_iface="{{ bond_item }}"
+ when: >-
+ not 'skipped' in bond_item and ((bond_item['ansible_facts']['type'] == 'ether') or
+ ( (bond_item['ansible_facts']['type'] == 'bonding') and bond_item['ansible_facts']['bond_valid_name'] ))
+ with_items:
+ - "{{ valid_network_interfaces['results'] }}"
+ loop_control:
+ loop_var: bond_item
+ register: bb_filtered_list
+ - name: Generate output list
+ ansible.builtin.set_fact:
+ host_net: >-
+ {{ [bridge_interface] if bridge_interface is defined else bb_filtered_list.results |
+ reject('skipped') | map(attribute='bond_item.ansible_facts.otopi_net_host') | list }}
+ - import_tasks: ../filter_team_devices.yml
+ - import_tasks: ../filter_unsupported_vlan_devices.yml
+ - name: Generate list of all unsupported network devices
+ ansible.builtin.set_fact:
+ invalid_net_if: "{{ invalid_vlan_if + team_if }}"
+ - name: Filter unsupported interface types
+ ansible.builtin.set_fact:
+ otopi_host_net: "{{ host_net | difference(invalid_net_if) }}"
+ register: otopi_host_net
+ - name: Failed if only unsupported devices are available
+ ansible.builtin.fail:
+ msg: >-
+ Only unsupported devices {{ invalid_net_if | join(', ') }} are present.
+ Teaming and bond modes: Round Robin, TLB, ALB are unsupported.
+ Supported VLAN naming convention is: VLAN_PARENT.VLAN_ID
+ The following bond modes are supported: {{ acceptable_bond_modes }}
+ when: (otopi_host_net.ansible_facts.otopi_host_net | length == 0)
+ - name: Validate selected bridge interface if management bridge does not exist
+ ansible.builtin.fail:
+ msg: The selected network interface is not valid
+ when:
+ he_bridge_if not in otopi_host_net.ansible_facts.otopi_host_net and bridge_interface is not defined and
+ not he_just_collect_network_interfaces
+...
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/002_validate_hostname_tasks.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/002_validate_hostname_tasks.yml
new file mode 100644
index 000000000..776720a83
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/002_validate_hostname_tasks.yml
@@ -0,0 +1,122 @@
+---
+- name: Choose IPv4, IPv6 or auto
+ import_tasks: ipv_switch.yml
+- name: Define he_host_address and he_host_ip
+ import_tasks: fetch_host_ip.yml
+ when: he_host_ip is none or he_host_address is none
+
+- name: Validate host hostname
+ block:
+ - name: Avoid localhost
+ ansible.builtin.fail:
+ msg: >
+ localhost is not a valid address
+ when: he_host_address in ['localhost', 'localhost.localdomain']
+ - name: Ensure host address resolves locally
+ ansible.builtin.fail:
+ msg: >
+ The address proposed for this host does not resolves locally
+ when: he_host_ip not in ansible_all_ipv4_addresses | union(ansible_all_ipv6_addresses)
+ - name: Ensure the resolved address resolves on the selected interface
+ block:
+ - name: Get target address from selected interface (IPv4)
+ ansible.builtin.shell: >-
+ ip addr show
+ {{ he_mgmt_network
+ if 'ansible_' + he_mgmt_network.replace('-','_') in hostvars[inventory_hostname]
+ else he_bridge_if }} |
+ grep 'inet ' |
+ cut -d' ' -f6 |
+ cut -d'/' -f1
+ register: target_address_v4
+ changed_when: true
+ - name: Debug var target_address_v4
+ ansible.builtin.debug:
+ var: target_address_v4
+ - name: Get target address from selected interface (IPv6)
+ ansible.builtin.shell: >-
+ ip addr show
+ {{ he_mgmt_network
+ if 'ansible_' + he_mgmt_network.replace('-','_') in hostvars[inventory_hostname]
+ else he_bridge_if }} |
+ grep 'inet6 ' |
+ cut -d' ' -f6 |
+ cut -d'/' -f1
+ register: target_address_v6
+ changed_when: true
+ - name: Debug var target_address_v6
+ ansible.builtin.debug:
+ var: target_address_v6
+ - name: Check the resolved address resolves on the selected interface
+ ansible.builtin.fail:
+ msg: >
+ The resolved address doesn't resolve
+ on the selected interface
+ when: >-
+ he_host_ip not in target_address_v4.stdout_lines and
+ he_host_ip not in target_address_v6.stdout_lines
+ - name: Check for alias
+ ansible.builtin.shell: getent {{ ip_key }} {{ he_host_address }} | cut -d' ' -f1 | uniq
+ register: hostname_res_count_output
+ changed_when: true
+ ignore_errors: true
+ - name: Debug var hostname_res_count_output
+ ansible.builtin.debug:
+ var: hostname_res_count_output
+ - name: Filter resolved address list
+ ansible.builtin.set_fact:
+ hostname_res_count_output_filtered: >-
+ {{ hostname_res_count_output.stdout_lines |
+ difference(target_address_v6.stdout_lines) |
+ difference(target_address_v4.stdout_lines) }}
+ - name: Ensure the resolved address resolves only on the selected interface
+ ansible.builtin.fail:
+ msg: >
+ hostname '{{ he_host_address }}' doesn't uniquely match the interface
+ '{{ he_bridge_if }}' selected for the management bridge;
+ it matches also interface with IP {{ hostname_res_count_output.stdout_lines |
+ difference([he_host_ip,]) }}.
+ Please make sure that the hostname got from
+ the interface for the management network resolves
+ only there.
+ when: hostname_res_count_output_filtered|length > 0
+ when: he_bridge_if is defined and he_bridge_if is not none and he_mgmt_network is defined
+ when: he_host_address is defined and he_host_address is not none
+- name: Validate engine he_fqdn
+ block:
+ - name: Avoid localhost
+ ansible.builtin.fail:
+ msg: >
+ localhost is not a valid he_fqdn for the engine VM
+ when: he_fqdn in ['localhost', 'localhost.localdomain']
+ - name: Get engine FQDN resolution
+ ansible.builtin.shell: getent {{ ip_key }} {{ he_fqdn }} | grep STREAM
+ environment: "{{ he_cmd_lang }}"
+ register: fqdn_resolution_output
+ changed_when: true
+ ignore_errors: true
+ - name: Check engine he_fqdn resolution
+ ansible.builtin.fail:
+ msg: >
+ Unable to resolve address
+ when: fqdn_resolution_output.rc != 0
+ - name: Parse engine he_fqdn resolution
+ ansible.builtin.set_fact:
+ r_fqdn_address: "{{ fqdn_resolution_output.stdout.split()[0] }}"
+ - name: Ensure engine he_fqdn doesn't resolve locally
+ ansible.builtin.fail:
+ msg: >
+ The he_fqdn proposed for the engine VM resolves on this host
+ when: r_fqdn_address in ansible_all_ipv4_addresses | union(ansible_all_ipv6_addresses)
+ - name: Check http/https proxy
+ ansible.builtin.fail:
+ msg: >
+ Your system is configured to use a proxy, please
+ add an exception for {{ url }} with no_proxy directive.
+ when: url is ovirt.ovirt.proxied
+ loop_control:
+ loop_var: url
+ with_items:
+ - "http://{{ he_fqdn }}/"
+ - "https://{{ he_fqdn }}/"
+ when: he_fqdn is defined and he_fqdn is not none
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/define_variables.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/define_variables.yml
new file mode 100644
index 000000000..4467b1b64
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/define_variables.yml
@@ -0,0 +1,52 @@
+---
+- name: Define Variables
+ block:
+ - name: Define he_cloud_init_domain_name
+ block:
+ - name: Get domain name
+ ansible.builtin.command: hostname -d
+ changed_when: true
+ register: host_domain_name
+ - name: Set he_cloud_init_domain_name
+ ansible.builtin.set_fact:
+ he_cloud_init_domain_name: "{{ host_domain_name.stdout_lines[0] if host_domain_name.stdout_lines else '' }}"
+ when: he_cloud_init_domain_name is not defined
+ - name: Define he_cloud_init_host_name
+ ansible.builtin.set_fact:
+ he_cloud_init_host_name: "{{ he_fqdn }}"
+ - name: Define he_vm_uuid
+ block:
+ - name: Get uuid
+ ansible.builtin.command: uuidgen
+ changed_when: true
+ register: uuid
+ - name: Set he_vm_uuid
+ ansible.builtin.set_fact:
+ he_vm_uuid: "{{ uuid.stdout }}"
+ - name: Define he_nic_uuid
+ block:
+ - name: Get uuid
+ ansible.builtin.command: uuidgen
+ changed_when: true
+ register: uuid
+ - name: Set he_nic_uuid
+ ansible.builtin.set_fact:
+ he_nic_uuid: "{{ uuid.stdout }}"
+ - name: Define he_cdrom_uuid
+ block:
+ - name: Get uuid
+ ansible.builtin.command: uuidgen
+ changed_when: true
+ register: uuid
+ - name: Set he_cdrom_uuid
+ ansible.builtin.set_fact:
+ he_cdrom_uuid: "{{ uuid.stdout }}"
+ - name: Define Timezone
+ block:
+ - name: get timezone
+ ansible.builtin.shell: timedatectl | grep "Time zone" | awk '{print $3}'
+ changed_when: true
+ register: timezone
+ - name: Set he_time_zone
+ ansible.builtin.set_fact:
+ he_time_zone: "{{ timezone.stdout }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_data_center_name.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_data_center_name.yml
new file mode 100644
index 000000000..1c242b845
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_data_center_name.yml
@@ -0,0 +1,15 @@
+---
+- name: Validate Data Center name format
+ block:
+ - name: Fail if Data Center name format is incorrect
+ ansible.builtin.fail:
+ msg: >-
+ "Invalid Data Center name format. Data Center name may only contain letters, numbers, '-', or '_'."
+ " Got {{ he_data_center }}"
+ when: not he_data_center | regex_search( "^[a-zA-Z0-9_-]+$" )
+ - name: Validate Cluster name
+ ansible.builtin.fail:
+ msg: >-
+ "Cluster name cannot be 'Default'. This is a reserved name for the default DataCenter. Please choose"
+ " another name for the cluster"
+ when: he_data_center != "Default" and he_cluster == "Default"
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_firewalld.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_firewalld.yml
new file mode 100644
index 000000000..3480b2afe
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_firewalld.yml
@@ -0,0 +1,14 @@
+---
+- name: Check firewalld status
+ block:
+ - name: Check firewalld status
+ ansible.builtin.systemd:
+ name: firewalld
+ register: firewalld_s
+ - name: Enforce firewalld status
+ ansible.builtin.fail:
+ msg: >
+ firewalld is required to be enabled and active in order
+ to correctly deploy hosted-engine.
+ Please check, fix accordingly and re-deploy.
+ when: firewalld_s.status.SubState != 'running' or firewalld_s.status.LoadState == 'masked'
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_gateway.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_gateway.yml
new file mode 100644
index 000000000..3083db2a0
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_gateway.yml
@@ -0,0 +1,25 @@
+---
+- name: Define default gateway
+ block:
+ - name: Get default gateway IPv4
+ ansible.builtin.shell: ip r | grep default | awk '{print $3}'
+ changed_when: true
+ register: get_gateway_4
+ when: he_default_gateway_4 is not defined or he_default_gateway_4 is none or not he_default_gateway_4
+ - name: Get default gateway IPv6
+ ansible.builtin.shell: ip -6 r | grep default | awk '{print $3}'
+ changed_when: true
+ register: get_gateway_6
+ when: he_default_gateway_6 is not defined or he_default_gateway_6 is none or not he_default_gateway_6
+ - name: Set he_gateway
+ ansible.builtin.set_fact:
+ he_gateway: >-
+ {{ get_gateway_4.stdout_lines[0] if get_gateway_4.stdout_lines else
+ get_gateway_6.stdout_lines[0] if get_gateway_6.stdout_lines else
+ ''
+ }}
+ when: he_gateway is not defined or he_gateway is none or not he_gateway|trim
+- name: Fail if there is no gateway
+ ansible.builtin.fail:
+ msg: "No default gateway is defined"
+ when: he_gateway is none or not he_gateway|trim
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_mac_address.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_mac_address.yml
new file mode 100644
index 000000000..e5b7d77e0
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_mac_address.yml
@@ -0,0 +1,15 @@
+---
+- name: Define Engine VM MAC address
+ block:
+ - name: Generate unicast MAC address
+ ansible.builtin.shell: od -An -N6 -tx1 /dev/urandom | sed -e 's/^ *//' -e 's/ */:/g' -e 's/:$//' -e 's/^\(.\)[13579bdf]/\10/'
+ changed_when: true
+ register: mac_address
+ - name: Set he_vm_mac_addr
+ ansible.builtin.set_fact:
+ he_vm_mac_addr: >-
+ {{ mac_address.stdout if he_vm_mac_addr is not defined or he_vm_mac_addr is none else he_vm_mac_addr }}
+ - name: Fail if MAC address structure is incorrect
+ ansible.builtin.fail:
+ msg: "Invalid unicast MAC address format. Got {{ he_vm_mac_addr }}"
+ when: not he_vm_mac_addr | regex_search( "^[a-fA-F0-9][02468aAcCeE](:[a-fA-F0-9]{2}){5}$" )
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_memory_size.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_memory_size.yml
new file mode 100644
index 000000000..0b867deb8
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_memory_size.yml
@@ -0,0 +1,44 @@
+---
+- name: Get available memory amount
+ block:
+ - name: Get free memory
+ ansible.builtin.shell: free -m | grep Mem | awk '{print $4}'
+ changed_when: true
+ register: free_mem
+ - name: Get cached memory
+ ansible.builtin.shell: free -m | grep Mem | awk '{print $6}'
+ changed_when: true
+ register: cached_mem
+ - name: Set Max memory
+ ansible.builtin.set_fact:
+ max_mem: "{{ free_mem.stdout|int + cached_mem.stdout|int - he_reserved_memory_MB + he_avail_memory_grace_MB }}"
+- name: set he_mem_size_MB to max available if not defined
+ ansible.builtin.set_fact:
+ he_mem_size_MB: "{{ he_mem_size_MB if he_mem_size_MB != 'max' else max_mem }}"
+- name: Fail if available memory is less then the minimal requirement
+ ansible.builtin.fail:
+ msg: >-
+ Available memory ( {{ max_mem }}MB ) is less then the minimal requirement ({{ he_minimal_mem_size_MB }}MB).
+ Be aware that {{ he_reserved_memory_MB }}MB is reserved for the host and cannot be allocated to the
+ engine VM.
+ when: >-
+ he_requirements_check_enabled and he_memory_requirements_check_enabled and max_mem|int < he_minimal_mem_size_MB|int
+- name: Fail if user chose less memory then the minimal requirement
+ ansible.builtin.fail:
+ msg: "Memory size must be at least {{ he_minimal_mem_size_MB }}MB, while you selected only {{ he_mem_size_MB }}MB"
+ when: >-
+ he_requirements_check_enabled and
+ he_memory_requirements_check_enabled and he_minimal_mem_size_MB|int > he_mem_size_MB|int
+- name: Fail if user chose more memory then the available memory
+ ansible.builtin.fail:
+ msg: >-
+ Not enough memory! {{ he_mem_size_MB }}MB, while only {{ max_mem }}MB are available on the host.
+ Be aware that {{ he_reserved_memory_MB }}MB is reserved for the host and cannot be allocated to the
+ engine VM.
+ when: >-
+ he_requirements_check_enabled and
+ he_memory_requirements_check_enabled and he_mem_size_MB|int > max_mem|int
+- name: Fail if he_disk_size_GB is smaller then the minimal requirement
+ ansible.builtin.fail:
+ msg: "Disk size too small: ({{ he_disk_size_GB }}GB), disk size must be at least {{ he_minimal_disk_size_GB }}GB"
+ when: he_requirements_check_enabled and he_disk_size_GB|int < he_minimal_disk_size_GB|int
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_network_test.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_network_test.yml
new file mode 100644
index 000000000..2bc7b9a94
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_network_test.yml
@@ -0,0 +1,37 @@
+---
+- name: Validate network connectivity check configuration
+ block:
+ - name: Fail if he_network_test is not valid
+ ansible.builtin.fail:
+ msg: "Invalid he_network_test defined"
+ changed_when: true
+ when: he_network_test not in ['dns', 'ping', 'tcp', 'none']
+ - name: Validate TCP network connectivity check parameters
+ block:
+ - name: Debug var he_tcp_t_address
+ ansible.builtin.debug:
+ var: he_tcp_t_address
+ - name: Fail if he_tcp_t_address is not defined
+ ansible.builtin.fail:
+ msg: "No he_tcp_t_address is defined"
+ changed_when: true
+ when:
+ ( he_tcp_t_address is undefined ) or
+ ( he_tcp_t_address is none ) or
+ ( he_tcp_t_address|trim|length == 0 )
+ - name: Debug var he_tcp_t_port
+ ansible.builtin.debug:
+ var: he_tcp_t_port
+ - name: Fail if he_tcp_t_port is not defined
+ ansible.builtin.fail:
+ msg: "No he_tcp_t_port is defined"
+ changed_when: true
+ when:
+ ( he_tcp_t_port is undefined ) or
+ ( he_tcp_t_port is none )
+ - name: Fail if he_tcp_t_port is no integer
+ ansible.builtin.fail:
+ msg: "he_tcp_t_port has to be integer"
+ changed_when: true
+ when: not he_tcp_t_port|int
+ when: he_network_test == 'tcp'
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_services_status.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_services_status.yml
new file mode 100644
index 000000000..45be40ee8
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_services_status.yml
@@ -0,0 +1,17 @@
+---
+- name: Populate service facts
+ ansible.builtin.systemd:
+ name: "{{ service_item }}"
+ register: checked_services
+ with_items:
+ - firewalld
+ loop_control:
+ loop_var: service_item
+- name: Fail if the service is masked or not running
+ ansible.builtin.fail:
+ msg: "{{ service.name }} is masked or not running"
+ when: service.status.SubState != 'running' or service.status.LoadState == 'masked'
+ with_items: "{{ checked_services.results }}"
+ loop_control:
+ label: "{{ service.name }}"
+ loop_var: service
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_vcpus_count.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_vcpus_count.yml
new file mode 100644
index 000000000..e84ec0a16
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_vcpus_count.yml
@@ -0,0 +1,17 @@
+---
+- name: Define he_maxvcpus
+ block:
+ - name: get max cpus
+ ansible.builtin.command: grep -c ^processor /proc/cpuinfo
+ changed_when: true
+ register: max_cpus
+ - name: Set he_maxvcpus
+ ansible.builtin.set_fact:
+ he_maxvcpus: "{{ max_cpus.stdout }}"
+- name: Set he_vcpus to maximum amount if not defined
+ ansible.builtin.set_fact:
+ he_vcpus: "{{ he_vcpus if he_vcpus != 'max' else he_maxvcpus }}"
+- name: Check number of chosen CPUs
+ ansible.builtin.fail:
+ msg: "Invalid number of cpu specified: {{ he_vcpus }}, while only {{ he_maxvcpus }} are available on the host"
+ when: he_maxvcpus|int < he_vcpus|int
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_backup.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_backup.yml
new file mode 100644
index 000000000..80f963113
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_backup.yml
@@ -0,0 +1,87 @@
+---
+- name: Copy the backup file to the engine VM for restore
+ ansible.builtin.copy:
+ src: "{{ he_restore_from_file }}"
+ dest: /root/engine_backup
+ owner: root
+ group: root
+ mode: 0600
+- name: Run engine-backup
+ ansible.builtin.shell: >-
+ engine-backup --mode=restore
+ --log=/var/log/ovirt-engine/setup/restore-backup-$(date -u +%Y%m%d%H%M%S).log
+ --file=/root/engine_backup --provision-all-databases --restore-permissions
+ environment: "{{ he_cmd_lang }}"
+ register: engine_restore_out
+ ignore_errors: true
+ changed_when: true
+- name: Pause the execution to let the user interactively handle restore failures
+ block:
+ - name: Let the user connect to the bootstrap engine VM to manually handle restore failures
+ ansible.builtin.debug:
+ msg: >-
+ engine-backup --mode=restore failed:
+
+ {{ engine_restore_out.stderr }}
+
+ You can now connect from this host to the bootstrap engine VM using ssh as root
+ and the temporary IP address -
+ {{ hostvars[he_ansible_host_name]['local_vm_ip']['stdout_lines'][0] }} -
+ and fix this issue. Please continue only after the backup is restored.
+
+ To retry the command that failed, you can run, on the bootstrap engine VM:
+
+ engine-backup --mode=restore --file=/root/engine_backup --provision-all-databases
+ --restore-permissions
+ - include_tasks: pause_execution.yml
+ when: engine_restore_out.rc != 0
+- name: Remove backup file
+ ansible.builtin.file:
+ state: absent
+ path: /root/engine_backup
+- name: Remove previous hosted-engine VM
+ ansible.builtin.command: >-
+ "{{ engine_psql }}" -c "SELECT deletevm(vm_guid) FROM (SELECT vm_guid FROM vms WHERE origin=6) t"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: db_remove_old_enginevm
+- name: Update host used to redeploy
+ include_tasks: restore_host_redeploy.yml
+ loop:
+ - { vds_type: 'vds_name', input: "{{ he_host_name }}" }
+ - { vds_type: 'vds_unique_id', input: "{{ hostvars[he_ansible_host_name]['unique_id_out']['stdout_lines']|first }}" }
+- name: Rename previous HE storage domain to avoid name conflicts
+ ansible.builtin.command: >-
+ "{{ engine_psql }}" -c
+ "UPDATE storage_domain_static SET
+ storage_name='{{ he_storage_domain_name }}_old_{{ ansible_date_time.iso8601_basic_short }}' WHERE
+ storage_name='{{ he_storage_domain_name }}'"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: db_rename_he_sd
+- name: Save original DisableFenceAtStartupInSec
+ ansible.builtin.shell: >-
+ set -euo pipefail && engine-config -g DisableFenceAtStartupInSec |
+ cut -d' ' -f2 > /root/DisableFenceAtStartupInSec.txt
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+- name: Update DisableFenceAtStartupInSec to prevent host fencing during the recovery
+ ansible.builtin.command: "engine-config -s DisableFenceAtStartupInSec=86400"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+- name: Add lines to engine-setup answerfile for PKI renewal
+ ansible.builtin.lineinfile:
+ path: /root/ovirt-engine-answers
+ line: "{{ item }}"
+ with_items:
+ - "OVESETUP_PKI/renew=bool:{{ he_pki_renew_on_restore }}"
+ - "QUESTION/1/OVESETUP_SKIP_RENEW_PKI_CONFIRM=str:yes"
+- name: remove version lock from the engine
+ ansible.builtin.file:
+ state: absent
+ path: /etc/yum/pluginconf.d/versionlock.list
+- name: recreate versionlock empty file
+ ansible.builtin.file:
+ state: touch
+ path: /etc/yum/pluginconf.d/versionlock.list
+ mode: 0644
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_host_redeploy.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_host_redeploy.yml
new file mode 100644
index 000000000..7dd0752e1
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_host_redeploy.yml
@@ -0,0 +1,29 @@
+# Used by restore_backup.yml
+---
+- name: Update dynamic data for VMs on the host used to redeploy
+ ansible.builtin.command: >-
+ "{{ engine_psql }}" -c
+ "UPDATE vm_dynamic SET run_on_vds = NULL, status=0 /* Down */ WHERE run_on_vds IN
+ (SELECT vds_id FROM vds WHERE
+ upper({{ item.vds_type }})=upper('{{ item.input }}'))"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: db_update_host_vms
+- name: Update dynamic data for VMs migrating to the host used to redeploy
+ ansible.builtin.command: >-
+ "{{ engine_psql }}" -c
+ "UPDATE vm_dynamic SET migrating_to_vds = NULL, status=0 /* Down */ WHERE migrating_to_vds IN
+ (SELECT vds_id FROM vds WHERE
+ upper({{ item.vds_type }})=upper('{{ item.input }}'))"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: db_update_host_migrating_vms
+- name: Remove host used to redeploy
+ ansible.builtin.command: >-
+ "{{ engine_psql }}" -c
+ "SELECT deletevds(vds_id) FROM
+ (SELECT vds_id FROM vds WHERE
+ upper({{ item.vds_type }})=upper('{{ item.input }}')) t"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: db_remove_he_host
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/search_available_network_subnet.yaml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/search_available_network_subnet.yaml
new file mode 100644
index 000000000..cf672cd4b
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/search_available_network_subnet.yaml
@@ -0,0 +1,57 @@
+---
+- name: Search for an available IPv4 subnet
+ block:
+ - name: Define 3rd chunk
+ ansible.builtin.set_fact:
+ chunk: 0
+ when: chunk is not defined
+ - name: Set 3rd chunk
+ ansible.builtin.set_fact:
+ chunk: "{{ chunk|int + 1 }}"
+ - name: Get ip route
+ ansible.builtin.shell: ip route get 192.168.{{ chunk }}.1 | grep "via" | cat
+ register: result
+ changed_when: false
+ - name: Fail if can't find an available subnet
+ ansible.builtin.fail:
+ msg: >-
+ "Cannot find an available subnet for internal Libvirt network"
+ "Please set it to an unused subnet by adding the variable 'he_ipv4_subnet_prefix'"
+ "to the variable-file ( e.g. he_ipv4_subnet_prefix: '123.123.123' )."
+ when: result.stdout.find("via") == -1 and chunk|int > 253
+ - name: Set new IPv4 subnet prefix
+ ansible.builtin.set_fact:
+ he_ipv4_subnet_prefix: "192.168.{{ chunk }}"
+ when: result.stdout.find("via") != -1
+ - name: Search again with another prefix
+ include_tasks: search_available_network_subnet.yaml
+ when: result.stdout.find("via") == -1
+ when: not ipv6_deployment|bool
+- name: Search for an available IPv6 subnet
+ block:
+ - name: Define 3rd chunk
+ ansible.builtin.set_fact:
+ chunk: 1000
+ when: chunk is not defined
+ - name: Set 3rd chunk
+ ansible.builtin.set_fact:
+ chunk: "{{ chunk|int + 45 }}" # 200 tries
+ - name: Get ip route
+ ansible.builtin.shell: ip -6 route get fd00:1234:{{ chunk }}:900::1 | grep "via" | cat
+ register: result
+ changed_when: false
+ - name: Fail if can't find an available subnet
+ ansible.builtin.fail:
+ msg: >-
+ "Cannot find an available subnet for internal Libvirt network"
+ "Please set it to an unused subnet by adding the variable 'he_ipv6_subnet_prefix'"
+ "to the variable-file ( e.g. he_ipv6_subnet_prefix: 'fd00:9876:5432:900' )."
+ when: result.stdout.find("via") == -1 and chunk|int > 9900
+ - name: Set new IPv6 subnet prefix
+ ansible.builtin.set_fact:
+ he_ipv6_subnet_prefix: "fd00:1234:{{ chunk }}:900"
+ when: result.stdout.find("via") != -1
+ - name: Search again with another prefix
+ include_tasks: search_available_network_subnet.yaml
+ when: result.stdout.find("via") == -1
+ when: ipv6_deployment|bool
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/sync_on_engine_machine.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/sync_on_engine_machine.yml
new file mode 100644
index 000000000..80e389407
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/sync_on_engine_machine.yml
@@ -0,0 +1,12 @@
+---
+- name: Set the name for add_host
+ ansible.builtin.set_fact:
+ he_fqdn_ansible_host: "{{ local_vm_ip.stdout_lines[0] }}"
+- name: Register the engine VM as an ansible host
+ import_tasks: add_engine_as_ansible_host.yml
+- name: Sync on engine machine
+ ansible.builtin.command: sync
+ changed_when: true
+ ignore_errors: true
+ ignore_unreachable: true
+ delegate_to: "{{ groups.engine[0] }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_ip_prefix.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_ip_prefix.yml
new file mode 100644
index 000000000..eb72b33bd
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_ip_prefix.yml
@@ -0,0 +1,29 @@
+---
+- name: Validate IP prefix
+ block:
+ - name: IPv4 Validation
+ block:
+ - name: Get IPv4 route
+ ansible.builtin.command: ip route get {{ he_ipv4_subnet_prefix + ".1" }}
+ register: ip_route_result
+ changed_when: false
+ - name: Debug var ip_route_result
+ ansible.builtin.debug:
+ var: ip_route_result
+ - name: Check if route exists
+ include_tasks: search_available_network_subnet.yaml
+ when: ip_route_result.stdout.find("via") == -1
+ when: not ipv6_deployment|bool
+ - name: IPv6 Validation
+ block:
+ - name: Get IPv6 route
+ ansible.builtin.command: ip route get {{ he_ipv6_subnet_prefix + "::1" }}
+ register: ip_route_result
+ changed_when: false
+ - name: Debug var ip_route_result
+ ansible.builtin.debug:
+ var: ip_route_result
+ - name: Check if route exists
+ include_tasks: search_available_network_subnet.yaml
+ when: ip_route_result.stdout.find("via") == -1
+ when: ipv6_deployment|bool
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_vlan_bond_mode.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_vlan_bond_mode.yml
new file mode 100644
index 000000000..9cd80a691
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_vlan_bond_mode.yml
@@ -0,0 +1,56 @@
+---
+- name: Check for base interface of VLAN devices
+ ansible.builtin.command: nmcli -g VLAN.PARENT device show {{ vlan_device.nic_if.nic }}
+ when: vlan_device.ansible_facts is defined and vlan_device.ansible_facts.is_vlan
+ with_items:
+ - "{{ vlan_list.results | reject('skipped') | list }}"
+ loop_control:
+ loop_var: vlan_device
+ register: vlan_base_interfaces
+- name: Get base interface types of VLAN devices
+ ansible.builtin.command: nmcli -g GENERAL.TYPE device show {{ vlan_base_interface.stdout }}
+ when: vlan_base_interface.skipped is undefined and vlan_base_interface.stdout is defined
+ with_items:
+ - "{{ vlan_base_interfaces.results }}"
+ loop_control:
+ loop_var: vlan_base_interface
+ register: vlan_base_types
+- name: Check for bond as base type of VLAN device
+ ansible.builtin.set_fact:
+ bond_parent: "{{ vlan_base_type.vlan_base_interface.stdout }}"
+ when: vlan_base_type.skipped is undefined and vlan_base_type.stdout is defined and vlan_base_type.stdout == "bond"
+ with_items:
+ - "{{ vlan_base_types.results | reject('skipped') | list }}"
+ loop_control:
+ loop_var: vlan_base_type
+ register: vlan_bond_list
+- name: Check if bond base interface of VLAN device is in supported mode
+ ansible.builtin.set_fact:
+ bond_parent_mode: "{{ hostvars[inventory_hostname]['ansible_' + vlan_bond_device.ansible_facts.bond_parent]['mode'] }}"
+ vlan_bond_invalid_if: "{{ vlan_bond_device.vlan_base_type.vlan_base_interface.vlan_device.nic_if.nic }}"
+ is_valid_bond_mode: "{{ hostvars[inventory_hostname]['ansible_' + vlan_bond_device.ansible_facts.bond_parent]['mode'] in acceptable_bond_modes }}"
+ with_items: "{{ vlan_bond_list.results | reject('skipped') | list }}"
+ loop_control:
+ loop_var: vlan_bond_device
+ register: vlan_bond_valid_mode_list
+- name: Check VLAN devices with bad bond mode base interfaces
+ ansible.builtin.set_fact:
+ bbm_vlan: "{{ vlan_bond_item.ansible_facts.vlan_bond_invalid_if }}"
+ when: not vlan_bond_item.ansible_facts.is_valid_bond_mode
+ with_items:
+ - "{{ vlan_bond_valid_mode_list.results }}"
+ loop_control:
+ loop_var: vlan_bond_item
+ register: bbm_vlan_list
+- name: Collect unsupported VLAN bonds
+ ansible.builtin.set_fact:
+ bad_vlan_bond_list: "{{ bbm_vlan_list.results | reject('skipped') | map(attribute='vlan_bond_item.ansible_facts.vlan_bond_invalid_if') | list }}"
+- name: Filter VLAN devices with invalid bond mode base interface
+ ansible.builtin.set_fact:
+ bad_vlan_bond_if_diff: "{{ host_net | difference(bad_vlan_bond_list) }}"
+- name: Fail if only VLAN devices with bad bond mode are available
+ ansible.builtin.fail:
+ msg: >-
+ Only VLAN devices {{ bad_vlan_bond_list | join(', ') }} with invalid bond mode are present.
+ Bond modes: Round Robin, TLB, ALB are unsupported.
+ when: (bad_vlan_bond_if_diff | length == 0)
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_vlan_name.yml b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_vlan_name.yml
new file mode 100644
index 000000000..3472fdbd4
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_vlan_name.yml
@@ -0,0 +1,30 @@
+---
+- name: Collect VLAN devices naming convention pattern
+ ansible.builtin.shell: set -eo pipefail && nmcli -g VLAN.PARENT,VLAN.ID device show {{ vlan_if.nic_if.nic }} | paste -sd '.'
+ with_items:
+ - "{{ vlan_list.results | reject('skipped') | list }}"
+ loop_control:
+ loop_var: vlan_if
+ changed_when: true
+ register: vlan_pattern_list
+- name: Check VLAN devices with bad naming
+ ansible.builtin.set_fact:
+ vlan_invalid_pattern: "{{ vlan_pattern.vlan_if.nic_if.nic }}"
+ when: vlan_pattern.vlan_if.nic_if.nic != vlan_pattern.stdout
+ with_items:
+ - "{{ vlan_pattern_list.results }}"
+ loop_control:
+ loop_var: vlan_pattern
+ register: bn_vlan_filtered
+- name: Collect VLAN devices with invalid naming convention
+ ansible.builtin.set_fact:
+ bad_vlan_names_list: "{{ bn_vlan_filtered.results | reject('skipped') | map(attribute='vlan_pattern.vlan_if.nic_if.nic') | list }}"
+- name: Filter VLAN devices with invalid naming convention
+ ansible.builtin.set_fact:
+ bad_vlan_names_if_diff: "{{ host_net | difference(bad_vlan_names_list) }}"
+- name: Fail if only VLAN devices with invalid naming convention are available
+ ansible.builtin.fail:
+ msg: >-
+ Only VLAN devices {{ bad_vlan_names_list | join(', ') }} with invalid naming convention are present.
+ Supported VLAN naming convention is: VLAN_PARENT.VLAN_ID
+ when: (bad_vlan_names_if_diff | length == 0)
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/broker.conf.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/broker.conf.j2
new file mode 100644
index 000000000..e71df8bbf
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/broker.conf.j2
@@ -0,0 +1,8 @@
+[email]
+smtp-server = {{ he_smtp_server }}
+smtp-port = {{ he_smtp_port }}
+source-email = {{ he_source_email }}
+destination-emails = {{ he_dest_email }}
+
+[notify]
+state_transition = maintenance|start|stop|migrate|up|down
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/fhanswers.conf.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/fhanswers.conf.j2
new file mode 100644
index 000000000..5b5b11337
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/fhanswers.conf.j2
@@ -0,0 +1,66 @@
+[environment:default]
+OVEHOSTED_CORE/screenProceed=bool:True
+OVEHOSTED_CORE/deployProceed=bool:True
+OVEHOSTED_CORE/confirmSettings=bool:True
+OVEHOSTED_NETWORK/fqdn=str:{{ he_fqdn }}
+OVEHOSTED_NETWORK/bridgeName=str:{{ he_mgmt_network }}
+OVEHOSTED_NETWORK/firewallManager=str:iptables
+OVEHOSTED_NETWORK/gateway=str:{{ he_gateway }}
+OVEHOSTED_ENGINE/clusterName=str:{{ he_cluster }}
+{# TODO: FIX #}
+OVEHOSTED_STORAGE/storageDatacenterName=str:hosted_datacenter
+OVEHOSTED_STORAGE/domainType=str:{{ he_domain_type }}
+{# TODO: FIX #}
+OVEHOSTED_STORAGE/connectionUUID=str:e29cf818-5ee5-46e1-85c1-8aeefa33e95d
+OVEHOSTED_STORAGE/LunID={{ 'str' if he_lun_id else 'none' }}:{{ he_lun_id if he_lun_id else 'None' }}
+OVEHOSTED_STORAGE/imgSizeGB=str:{{ he_disk_size_GB }}
+OVEHOSTED_STORAGE/mntOptions={{ 'str' if he_mount_options else 'none' }}:{{ he_mount_options if he_mount_options else 'None' }}
+OVEHOSTED_STORAGE/iSCSIPortalIPAddress={{ 'str' if he_iscsi_portal_addr else 'none' }}:{{ he_iscsi_portal_addr if he_iscsi_portal_addr else 'None' }}
+OVEHOSTED_STORAGE/metadataVolumeUUID=str:{{ he_metadata_disk_details.disk.image_id }}
+OVEHOSTED_STORAGE/sdUUID=str:{{ storage_domain_details.ovirt_storage_domains[0].id }}
+OVEHOSTED_STORAGE/iSCSITargetName={{ 'str' if he_iscsi_target else 'none' }}:{{ he_iscsi_target if he_iscsi_target else 'None' }}
+OVEHOSTED_STORAGE/metadataImageUUID=str:{{ he_metadata_disk_details.disk.id }}
+OVEHOSTED_STORAGE/lockspaceVolumeUUID=str:{{ he_sanlock_disk_details.disk.image_id }}
+OVEHOSTED_STORAGE/iSCSIPortalPort={{ 'str' if he_iscsi_portal_port else 'none' }}:{{ he_iscsi_portal_port if he_iscsi_portal_port else 'None' }}
+OVEHOSTED_STORAGE/imgUUID=str:{{ he_virtio_disk_details.disk.id }}
+OVEHOSTED_STORAGE/confImageUUID=str:{{ he_conf_disk_details.disk.id }}
+OVEHOSTED_STORAGE/spUUID=str:00000000-0000-0000-0000-000000000000
+OVEHOSTED_STORAGE/lockspaceImageUUID=str:{{ he_sanlock_disk_details.disk.id }}
+{# TODO: FIX #}
+OVEHOSTED_ENGINE/enableHcGlusterService=none:None
+OVEHOSTED_STORAGE/storageDomainName=str:{{ he_storage_domain_name }}
+OVEHOSTED_STORAGE/iSCSIPortal={{ 'str' if he_iscsi_tpgt else 'none' }}:{{ he_iscsi_tpgt if he_iscsi_tpgt else 'None' }}
+OVEHOSTED_STORAGE/volUUID=str:{{ he_virtio_disk_details.disk.image_id }}
+{# TODO: FIX #}
+OVEHOSTED_STORAGE/vgUUID=none:None
+OVEHOSTED_STORAGE/confVolUUID=str:{{ he_conf_disk_details.disk.image_id }}
+{% if he_domain_type=="nfs" or he_domain_type=="glusterfs" %}
+OVEHOSTED_STORAGE/storageDomainConnection=str:{{ he_storage_domain_addr }}:{{ he_storage_domain_path }}
+{% else %}
+OVEHOSTED_STORAGE/storageDomainConnection=str:{{ he_storage_domain_addr }}
+{% endif %}
+OVEHOSTED_STORAGE/iSCSIPortalUser={{ 'str' if he_iscsi_username else 'none' }}:{{ he_iscsi_username if he_iscsi_username else 'None' }}
+{# TODO: fix it #}
+OVEHOSTED_VDSM/consoleType=str:vnc
+OVEHOSTED_VM/vmMemSizeMB=int:{{ he_mem_size_MB }}
+OVEHOSTED_VM/vmUUID=str:{{ he_vm_details.vm.id }}
+OVEHOSTED_VM/vmMACAddr=str:{{ he_vm_mac_addr }}
+OVEHOSTED_VM/emulatedMachine=str:{{ he_emulated_machine }}
+OVEHOSTED_VM/vmVCpus=str:{{ he_vcpus }}
+OVEHOSTED_VM/ovfArchive=str:{{ he_appliance_ova }}
+OVEHOSTED_VM/vmCDRom=none:None
+OVEHOSTED_VM/automateVMShutdown=bool:True
+OVEHOSTED_VM/cloudInitISO=str:generate
+OVEHOSTED_VM/cloudinitInstanceDomainName={{ 'str' if he_cloud_init_domain_name else 'none' }}:{{ he_cloud_init_domain_name if he_cloud_init_domain_name else 'None' }}
+OVEHOSTED_VM/cloudinitInstanceHostName={{ 'str' if he_cloud_init_host_name else 'none' }}:{{ he_cloud_init_host_name if he_cloud_init_host_name else 'None' }}
+OVEHOSTED_VM/rootSshPubkey={{ 'str' if he_root_ssh_pubkey else 'none' }}:{{ he_root_ssh_pubkey if he_root_ssh_pubkey else 'None' }}
+OVEHOSTED_VM/cloudinitExecuteEngineSetup=bool:True
+OVEHOSTED_VM/cloudinitVMStaticCIDR={{ 'str' if he_vm_ip_addr is not none else 'none' }}:{{ he_vm_ip_addr if he_vm_ip_addr is not none else 'None' }}
+OVEHOSTED_VM/cloudinitVMTZ={{ 'str' if he_time_zone else 'none' }}:{{ he_time_zone if he_time_zone else 'None' }}
+OVEHOSTED_VM/rootSshAccess=str:yes
+OVEHOSTED_VM/cloudinitVMETCHOSTS=bool:{{ he_vm_etc_hosts }}
+OVEHOSTED_VM/cloudinitVMDNS={{ 'str' if he_dns_addr else 'none' }}:{{ he_dns_addr if he_dns_addr else 'None' }}
+OVEHOSTED_NOTIF/smtpPort=str:{{ he_smtp_port }}
+OVEHOSTED_NOTIF/smtpServer=str:{{ he_smtp_server }}
+OVEHOSTED_NOTIF/sourceEmail=str:{{ he_source_email }}
+OVEHOSTED_NOTIF/destEmail=str:{{ he_dest_email }}
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/hosted-engine.conf.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/hosted-engine.conf.j2
new file mode 100644
index 000000000..c5693caeb
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/hosted-engine.conf.j2
@@ -0,0 +1,42 @@
+fqdn={{ he_fqdn }}
+vm_disk_id={{ he_virtio_disk_details.disk.id }}
+vm_disk_vol_id={{ he_virtio_disk_details.disk.image_id }}
+vmid={{ he_vm_details.vm.id }}
+{% if he_domain_type=="nfs" or he_domain_type=="glusterfs" %}
+storage={{ he_storage_domain_addr }}:{{ he_storage_domain_path }}
+{% else %}
+storage={{ he_storage_domain_addr }}
+{% endif %}
+nfs_version={{ he_nfs_version }}
+mnt_options={{ he_mount_options }}
+conf=/var/run/ovirt-hosted-engine-ha/vm.conf
+host_id={{ host_spm_id }}
+console=vnc
+domainType={{ he_domain_type }}
+{# spUUID={{ datacenter_id }} #}
+{# To avoid triggering #}
+{# 3.5 -> 3.6 upgrade code #}
+spUUID=00000000-0000-0000-0000-000000000000
+sdUUID={{ storage_domain_details.ovirt_storage_domains[0].id }}
+{# TODO: fix it #}
+connectionUUID=e29cf818-5ee5-46e1-85c1-8aeefa33e95d
+vdsm_use_ssl=true
+gateway={{ he_gateway }}
+bridge={{ he_mgmt_network }}
+network_test={{ he_network_test }}
+tcp_t_address={{ he_tcp_t_address }}
+tcp_t_port={{ he_tcp_t_port }}
+metadata_volume_UUID={{ he_metadata_disk_details.disk.image_id }}
+metadata_image_UUID={{ he_metadata_disk_details.disk.id }}
+lockspace_volume_UUID={{ he_sanlock_disk_details.disk.image_id }}
+lockspace_image_UUID={{ he_sanlock_disk_details.disk.id }}
+conf_volume_UUID={{ he_conf_disk_details.disk.image_id }}
+conf_image_UUID={{ he_conf_disk_details.disk.id }}
+{# TODO: get OVF_STORE volume uid from the engine at deploy time #}
+
+# The following are used only for iSCSI storage
+iqn={{ he_iscsi_target }}
+portal={{ he_iscsi_tpgt }}
+user={{ he_iscsi_username }}
+password={{ he_iscsi_password }}
+port={{ he_iscsi_portal_port }}
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-dhcp.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-dhcp.j2
new file mode 100644
index 000000000..544737abe
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-dhcp.j2
@@ -0,0 +1,12 @@
+# generated by ovirt-hosted-engine-setup
+BOOTPROTO=dhcp
+DEVICE=eth0
+HWADDR="{{ he_vm_mac_addr }}"
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+ZONE=public
+DEFROUTE=yes
+IPV4_FAILURE_FATAL=no
+IPV6INIT=no
+NM_CONTROLLED=yes
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static-ipv6.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static-ipv6.j2
new file mode 100644
index 000000000..d4081ac2a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static-ipv6.j2
@@ -0,0 +1,25 @@
+# generated by ovirt-hosted-engine-setup
+BOOTPROTO=none
+DEVICE=eth0
+HWADDR="{{ he_vm_mac_addr }}"
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+ZONE=public
+IPV6INIT=yes
+IPV6_AUTOCONF=no
+IPV6ADDR={{ he_vm_ip_addr }}/{{ he_vm_ip_prefix }}
+IPV6_DEFAULTGW={{ he_gateway }}
+{% if he_dns_addr is string %}
+{% set DNS_ADDR_LIST = he_dns_addr.split(',') %}
+{% elif he_dns_addr is iterable %}
+{% set DNS_ADDR_LIST = he_dns_addr %}
+{% else %}
+{% set DNS_ADDR_LIST = [] %}
+{% endif %}
+{% for d in DNS_ADDR_LIST %}
+DNS{{loop.index}}={{ d }}
+{% endfor %}
+DEFROUTE=yes
+IPV4_FAILURE_FATAL=no
+NM_CONTROLLED=yes
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static.j2
new file mode 100644
index 000000000..2f61d2620
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static.j2
@@ -0,0 +1,25 @@
+# generated by ovirt-hosted-engine-setup
+BOOTPROTO=none
+DEVICE=eth0
+HWADDR="{{ he_vm_mac_addr }}"
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+ZONE=public
+IPADDR={{ he_vm_ip_addr }}
+PREFIX={{ he_vm_ip_prefix }}
+GATEWAY={{ he_gateway }}
+{% if he_dns_addr is string %}
+{% set DNS_ADDR_LIST = he_dns_addr.split(',') %}
+{% elif he_dns_addr is iterable %}
+{% set DNS_ADDR_LIST = he_dns_addr %}
+{% else %}
+{% set DNS_ADDR_LIST = [] %}
+{% endif %}
+{% for d in DNS_ADDR_LIST %}
+DNS{{loop.index}}={{ d }}
+{% endfor %}
+DEFROUTE=yes
+IPV4_FAILURE_FATAL=no
+IPV6INIT=no
+NM_CONTROLLED=yes
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/meta-data.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/meta-data.j2
new file mode 100644
index 000000000..3e34dccef
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/meta-data.j2
@@ -0,0 +1,2 @@
+instance-id: {{ he_vm_uuid }}
+local-hostname: {{ he_fqdn }}
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config-dhcp.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config-dhcp.j2
new file mode 100644
index 000000000..1da9f5d26
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config-dhcp.j2
@@ -0,0 +1,11 @@
+version: 1
+config:
+ - type: physical
+ name: eth0
+ mac_address: "{{ he_vm_mac_addr|lower }}"
+ subnets:
+{% if ipv6_deployment %}
+ - type: dhcp6
+{% else %}
+ - type: dhcp
+{% endif %}
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config.j2
new file mode 100644
index 000000000..d25c2a6f8
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config.j2
@@ -0,0 +1,24 @@
+<network>
+ <name>default</name>
+ <uuid>{{ network_dict['uuid'] }}</uuid>
+ <forward mode='nat'>
+ <nat ipv6='yes'>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='{{ network_dict['bridge']['name'] }}' stp='{{ network_dict['bridge']['stp'] }}' delay='{{ network_dict['bridge']['delay'] }}'/>
+{% if not he_force_ip4 %}
+ <ip family='ipv6' address='{{ he_ipv6_subnet_prefix + '::1' }}' prefix='64'>
+ <dhcp>
+ <range start='{{ he_ipv6_subnet_prefix + '::10' }}' end='{{ he_ipv6_subnet_prefix + '::ff' }}'/>
+ </dhcp>
+ </ip>
+{% endif %}
+{% if not he_force_ip6 %}
+ <ip address='{{ he_ipv4_subnet_prefix + '.1' }}' netmask='255.255.255.0'>
+ <dhcp>
+ <range start='{{ he_ipv4_subnet_prefix + '.2' }}' end='{{ he_ipv4_subnet_prefix + '.254' }}'/>
+ </dhcp>
+ </ip>
+{% endif %}
+</network>
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/user-data.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/user-data.j2
new file mode 100644
index 000000000..36e15313f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/user-data.j2
@@ -0,0 +1,20 @@
+#cloud-config
+# vim: syntax=yaml
+disable_root: false
+{% if he_root_ssh_pubkey is not none and he_root_ssh_pubkey|length > 1 %}
+ssh_authorized_keys:
+ - {{ he_root_ssh_pubkey }}
+{% endif %}
+ssh_pwauth: True
+chpasswd:
+ list: |
+ root:{{ he_hashed_appliance_password }}
+ expire: False
+{% if he_time_zone is defined %}
+timezone: {{ he_time_zone }}
+{% endif %}
+bootcmd:
+ - if grep -Gq "^\s*PermitRootLogin" /etc/ssh/sshd_config; then sed -re "s/^\s*(PermitRootLogin)\s+(yes|no|without-password)/\1 yes/" -i.$(date -u +%Y%m%d%H%M%S) /etc/ssh/sshd_config; else echo "PermitRootLogin yes" >> /etc/ssh/sshd_config; fi
+ - if grep -Gq "^\s*UseDNS" /etc/ssh/sshd_config; then sed -re "s/^\s*(UseDNS)\s+(yes|no)/\1 no/" -i.$(date -u +%Y%m%d%H%M%S) /etc/ssh/sshd_config; else echo "UseDNS no" >> /etc/ssh/sshd_config; fi
+runcmd:
+ - systemctl restart sshd &
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/version.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/version.j2
new file mode 100644
index 000000000..8e64b180c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/version.j2
@@ -0,0 +1 @@
+{{ ha_version }}
diff --git a/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/vm.conf.j2 b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/vm.conf.j2
new file mode 100644
index 000000000..fecb13306
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/vm.conf.j2
@@ -0,0 +1,16 @@
+vmId={{ he_vm_details.vm.id }}
+memSize={{ he_mem_size_MB }}
+display={{ he_console_type }}
+devices={index:2,iface:ide,address:{ controller:0, target:0,unit:0, bus:1, type:drive},specParams:{},readonly:true,deviceId:{{ he_cdrom_uuid }},path:{{ he_cdrom }},device:cdrom,shared:false,type:disk}
+devices={index:0,iface:virtio,format:raw,poolID:00000000-0000-0000-0000-000000000000,volumeID:{{ he_virtio_disk_details.disk.image_id }},imageID:{{ he_virtio_disk_details.disk.id }},specParams:{},readonly:false,domainID:{{ storage_domain_details.ovirt_storage_domains[0].id }},optional:false,deviceId:{{ he_virtio_disk_details.disk.image_id }},address:{bus:0x00, slot:0x06, domain:0x0000, type:pci, function:0x0},device:disk,shared:exclusive,propagateErrors:off,type:disk,bootOrder:1}
+devices={device:scsi,model:virtio-scsi,type:controller}
+devices={nicModel:pv,macAddr:{{ he_vm_mac_addr }},linkActive:true,network:{{ he_mgmt_network }},specParams:{},deviceId:{{ he_nic_uuid }},address:{bus:0x00, slot:0x03, domain:0x0000, type:pci, function:0x0},device:bridge,type:interface}
+devices={device:console,type:console}
+devices={device:{{ he_video_device }},alias:video0,type:video}
+devices={device:{{ he_graphic_device }},type:graphics}
+vmName={{ he_vm_name }}
+smp={{ he_vcpus }}
+maxVCpus={{ he_maxvcpus }}
+cpuType={{ cluster_cpu_model }}
+emulatedMachine={{ he_emulated_machine }}
+devices={device:virtio,specParams:{source:urandom},model:virtio,type:rng}
diff --git a/ansible_collections/ovirt/ovirt/roles/image_template/README.md b/ansible_collections/ovirt/ovirt/roles/image_template/README.md
new file mode 100644
index 000000000..0a339363a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/image_template/README.md
@@ -0,0 +1,158 @@
+oVirt Image Template
+====================
+
+The `image_template` role creates a template from external image. Currently the disk can be an image in Glance external provider or QCOW2 image.
+
+Requirements
+------------
+
+ * oVirt has to be 4.1 or higher and [ovirt-imageio] must be installed and running.
+ * CA certificate of oVirt engine. The path to CA certificate must be specified in the `ovirt_ca` variable.
+ * file
+
+Limitations
+-----------
+
+ * We don not support Ansible Check Mode (Dry Run), because this role is using few modules(command module),
+ which do not support it. Once all modules used by this role will support it, we will support it.
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|--------------------|-----------------------|----------------------------|
+| qcow_url | UNDEF (mandatory if glance is not used) | The URL of the QCOW2 image. You can specify local file with prefix 'file://'. |
+| qcow_url_client_cert | UNDEF | Path to client certificate if needed for retrieving QCOW from authenticated site. |
+| qcow_url_client_key | UNDEF | Path to client key if needed for retrieving QCOW from authenticated site. |
+| image_path | /tmp/ | Path where the QCOW2 image will be downloaded to. If directory the base name of the URL on the remote server will be used. |
+| image_checksum | UNDEF | If a checksum is defined, the digest of the destination file will be calculated after it is downloaded to ensure its integrity and verify that the transfer completed successfully. Format: <algorithm>:<checksum>, e.g. checksum="sha256:D98291AC[...]B6DC7B97". |
+| image_cache_download | true | When set to false will delete image_path at the start and end of execution |
+| template_cluster | Default | Name of the cluster where template must be created. |
+| template_io_threads| UNDEF | Number of IO threads used by template. 0 means IO threading disabled. (Added in ansible 2.7)|
+| template_name | mytemplate | Name of the template. |
+| template_memory | 2GiB | Amount of memory assigned to the template. |
+| template_memory_guaranteed | UNDEF | Amount of minimal guaranteed memory of the Virtual Machine |
+| template_memory_max | UNDEF | Upper bound of virtual machine memory up to which memory hot-plug can be performed. |
+| template_cpu | 1 | Number of CPUs assigned to the template. |
+| template_disk_storage | UNDEF | Name of the data storage domain where the disk must be created. If not specified, the data storage domain is selected automatically. |
+| template_disks | [] | List of dictionaries specifying the additional template disks. See below for more detailed description. |
+| template_disk_size | 10GiB | The size of the template disk. |
+| template_disk_name | UNDEF | The name of template disk. |
+| template_disk_format | UNDEF | Format of the template disk. |
+| template_disk_interface | virtio | Interface of the template disk. (Choices: virtio, ide, virtio_scsi) |
+| template_seal | true | 'Sealing' erases all machine-specific configurations from a filesystem. Not supported on Windows. Set this to 'false' for Windows. |
+| template_timeout | 600 | Amount of time to wait for the template to be created/imported. |
+| template_type | UNDEF | The type of the template: desktop, server or high_performance (for qcow2 based templates only) |
+| template_nics | {name: nic1, profile_name: ovirtmgmt, interface: virtio} | List of dictionaries that specify the NICs of template. |
+| template_operating_system | UNDEF | Operating system of the template like: other, rhel_7x64, debian_7, see others in ovirt_template module. |
+| template_bios_type | UNDEF | Set bios type, necessary for some operating systems and secure boot. If no value is passed, default value is set from cluster. |
+| glance_image_provider | UNDEF (mandatory if qcow_url is not used) | Name of the glance image provider. |
+| glance_image | UNDEF (mandatory if qcow_url is not used) | This parameter specifies the name of disk in glance provider to be imported as template. |
+| template_prerequisites_tasks | UNDEF | Works only with qcow image. Specify a path to Ansible tasks file, which should be executed on virtual machine before creating a template from it. Note that qcow image must contain guest agent which reports IP address. |
+
+The `template_disks` List of dictionaries can contain following attributes:
+
+| Name | Default value | |
+|--------------------|----------------|----------------------------------------------|
+| name | UNDEF (Required) | The name of the additional disk. |
+| size | UNDEF (Required) | The size of the additional disk. |
+| storage_domain | UNDEF | The name of storage domain where disk should be created. If no value is passed, value is set by <i>template_disk_storage</i>. |
+| interface | UNDEF | The interface of the disk. If no value is passed, value is set by <i>template_disk_interface</i>. |
+| format | UNDEF | Specify format of the disk. If no value is passed, value is set by <i>template_disk_format</i>. <ul><li>cow - If set, the disk will by created as sparse disk, so space will be allocated for the volume as needed. This format is also known as thin provisioned disks</li><li>raw - If set, disk space will be allocated right away. This format is also known as preallocated disks.</li></ul> |
+| bootable | UNDEF | True if the disk should be bootable. |
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: Create a template from qcow
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ qcow_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+ #qcow_url: file:///tmp/CentOS-7-x86_64-GenericCloud.qcow2
+ template_cluster: production
+ template_name: centos7_template
+ template_memory: 4GiB
+ template_cpu: 2
+ template_disk_size: 10GiB
+ template_disk_storage: mydata
+
+ roles:
+ - image_template
+ collections:
+ - ovirt.ovirt
+
+
+- name: Create a template from a disk stored in glance
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ glance_image_provider: qe-infra-glance
+ glance_image: rhel7.4_ovirt4.2_guest_disk
+ template_cluster: production
+ template_name: centos7_template
+ template_memory: 4GiB
+ template_cpu: 2
+ template_disk_size: 10GiB
+ template_disk_storage: mydata
+
+ roles:
+ - image_template
+ collections:
+ - ovirt.ovirt
+
+- name: Create a template from qcow2.xz
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ pre_tasks:
+ - name: Download qcow2.xz file
+ get_url:
+ url: "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz"
+ dest: /tmp
+ register: downloaded_file
+
+ - name: Extract downloaded QCOW image
+ command: "unxz --keep --force {{ downloaded_file.dest }}"
+
+ - name: Set qcow_url to extracted file
+ set_fact:
+ qcow_url: "file://{{ (downloaded_file.dest | splitext)[0] }}"
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ template_cluster: production
+ template_name: centos7_template
+ template_memory: 4GiB
+ template_cpu: 2
+ template_disk_size: 10GiB
+ template_disk_storage: mydata
+
+ roles:
+ - image_template
+ collections:
+ - ovirt.ovirt
+```
+
+[![asciicast](https://asciinema.org/a/111478.png)](https://asciinema.org/a/111478)
+
+[ovirt-imageio]: http://www.ovirt.org/develop/release-management/features/storage/image-upload/
diff --git a/ansible_collections/ovirt/ovirt/roles/image_template/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/image_template/defaults/main.yml
new file mode 100644
index 000000000..db3f22343
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/image_template/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+image_path: /tmp
+image_cache_download: true
+image_download_timeout: 180
+template_cluster: Default
+template_name: mytemplate
+template_memory: 2GiB
+template_cpu: 1
+template_disk_size: 10GiB
+template_operating_system: rhel_7x64
+template_timeout: 600
+template_disks: []
+template_disk_interface: virtio
+template_nics:
+ - name: nic1
+ profile_name: ovirtmgmt
+ interface: virtio
+template_seal: true
+
+disk_resize_timeout: 60
+disk_storage_domain: null
diff --git a/ansible_collections/ovirt/ovirt/roles/image_template/examples/ovirt_image_template.yml b/ansible_collections/ovirt/ovirt/roles/image_template/examples/ovirt_image_template.yml
new file mode 100644
index 000000000..0629157a0
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/image_template/examples/ovirt_image_template.yml
@@ -0,0 +1,27 @@
+---
+- name: oVirt image template
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+ qcow_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+ # qcow_url: file:///tmp/CentOS-7-x86_64-GenericCloud.qcow2
+ template_cluster: production
+ template_name: centos7_template
+ template_memory: 2GiB
+ template_cpu: 2
+ template_disk_size: 10GiB
+ template_disk_storage: nfs
+
+ roles:
+ - image_template
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/image_template/examples/passwords.yml b/ansible_collections/ovirt/ovirt/roles/image_template/examples/passwords.yml
new file mode 100644
index 000000000..92c7613c9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/image_template/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/ansible_collections/ovirt/ovirt/roles/image_template/tasks/empty.yml b/ansible_collections/ovirt/ovirt/roles/image_template/tasks/empty.yml
new file mode 100644
index 000000000..157d95ab0
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/image_template/tasks/empty.yml
@@ -0,0 +1,8 @@
+---
+# Placeholder Ansible tasks file, to avoid WARNINGS in play,
+# when user don't specify template_prerequisites_tasks.
+# When in future Anibsle will support delegate_to with include_tasks,
+# this file won't be needed.
+- name: Placeholder
+ ansible.builtin.debug:
+ msg: ""
diff --git a/ansible_collections/ovirt/ovirt/roles/image_template/tasks/glance_image.yml b/ansible_collections/ovirt/ovirt/roles/image_template/tasks/glance_image.yml
new file mode 100644
index 000000000..129094d03
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/image_template/tasks/glance_image.yml
@@ -0,0 +1,62 @@
+---
+- name: Glance image
+ block:
+ - name: Check if template is correct
+ ansible.builtin.fail:
+ msg: "one of mandatory parameters glance_image_provider or glance_image is not defined"
+ when: "glance_image_provider is undefined or glance_image is undefined"
+
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ hostname: "{{ engine_fqdn | default(lookup('env','OVIRT_HOSTNAME')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: loggedin
+ tags:
+ - always
+
+ - name: Fetch storages
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ register: sd_info
+ tags:
+ - ovirt-template-image
+
+ - name: Find data domain
+ ansible.builtin.set_fact:
+ disk_storage_domain: "{{ sd_info.ovirt_storage_domains | ovirt.ovirt.json_query(the_query) | list | first }}"
+ vars:
+ the_query: "[?type=='data']"
+ tags:
+ - ovirt-template-image
+
+ - name: Import templates from glance
+ ovirt_template:
+ auth: "{{ ovirt_auth }}"
+ state: imported
+ name: "{{ template_name }}"
+ template_image_disk_name: "{{ template_disk_name | default(omit) }}"
+ image_provider: "{{ glance_image_provider }}"
+ image_disk: "{{ glance_image }}"
+ io_threads: "{{ template_io_threads | default(omit) }}"
+ storage_domain: "{{ template_disk_storage | default(disk_storage_domain.name) }}"
+ cluster: "{{ template_cluster }}"
+ operating_system: "{{ template_operating_system | default(omit) }}"
+ memory: "{{ template_memory | default(omit) }}"
+ memory_guaranteed: "{{ template_memory_guaranteed | default(omit) }}"
+ memory_max: "{{ template_memory_max | default(omit) }}"
+ timeout: "{{ template_timeout }}"
+ bios_type: "{{ template_bios_type | default(omit) }}"
+
+ always:
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when: not loggedin.skipped | default(false)
+ tags:
+ - always
diff --git a/ansible_collections/ovirt/ovirt/roles/image_template/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/image_template/tasks/main.yml
new file mode 100644
index 000000000..2729a7f56
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/image_template/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Fail when invalid parameters
+ ansible.builtin.fail:
+ msg: "You must either specify qcow_url or glance_image"
+ when: "glance_image is defined and qcow_url is defined"
+
+- name: Image upload
+ include_tasks: "{{ (glance_image is defined) | ternary('glance', 'qcow2') }}_image.yml"
diff --git a/ansible_collections/ovirt/ovirt/roles/image_template/tasks/qcow2_image.yml b/ansible_collections/ovirt/ovirt/roles/image_template/tasks/qcow2_image.yml
new file mode 100644
index 000000000..a1c465f96
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/image_template/tasks/qcow2_image.yml
@@ -0,0 +1,259 @@
+---
+- name: Check if template is correct
+ ansible.builtin.fail:
+ msg: "mandatory parameter 'qcow_url' is not defined"
+ when: "qcow_url is undefined"
+
+- name: Check if {{ image_path }} is directory
+ ansible.builtin.stat:
+ path: "{{ image_path }}"
+ register: image_path_st
+
+- name: Download the qcow image
+ ansible.builtin.get_url:
+ url: "{{ qcow_url }}"
+ dest: "{{ image_path_st.stat.isdir is defined and image_path_st.stat.isdir | ternary(image_path~'/'~qcow_url.rpartition('/')[-1], image_path) | regex_replace('//', '/') }}" # noqa yaml[line-length]
+ force: "{{ not image_cache_download }}"
+ checksum: "{{ image_checksum | default(omit) }}"
+ timeout: "{{ image_download_timeout }}"
+ client_cert: "{{ qcow_url_client_cert | default(omit) }}"
+ client_key: "{{ qcow_curl_client_key | default(omit) }}"
+ mode: "0644"
+ register: downloaded_file
+ tags:
+ - ovirt-template-image
+
+- name: Check file type
+ ansible.builtin.command: "/usr/bin/file {{ downloaded_file.dest | quote }}"
+ changed_when: false
+ register: filetype
+ tags:
+ - ovirt-template-image
+
+- name: Fail if image is not qcow
+ ansible.builtin.fail:
+ msg: "The downloaded file is not valid QCOW file."
+ when: '"QCOW" not in filetype.stdout'
+ tags:
+ - ovirt-template-image
+
+- name: Calculate image size in GiB
+ ansible.builtin.set_fact:
+ qcow2_size: "{{ (filetype.stdout_lines[0].split()[5] | int / 2**30) | round(0, 'ceil') | int }}GiB"
+
+- name: Main block
+ block:
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ hostname: "{{ engine_fqdn | default(lookup('env','OVIRT_HOSTNAME')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: loggedin
+ tags:
+ - always
+
+ - name: Fetch the datacenter name
+ ovirt_datacenter_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "Clusters.name = {{ template_cluster }}"
+ register: dc_info
+
+ - name: Fetch storages
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ dc_info.ovirt_datacenters[0].name }}"
+ register: sd_info
+ when: template_disk_storage is undefined
+ tags:
+ - ovirt-template-image
+
+ - name: Find data domain
+ ansible.builtin.set_fact:
+ disk_storage_domain: "{{ sd_info.ovirt_storage_domains | ovirt.ovirt.json_query(the_query) | list | first }}"
+ when: template_disk_storage is undefined
+ vars:
+ the_query: "[?type=='data']"
+ tags:
+ - ovirt-template-image
+
+ - name: Check if template already exists
+ ovirt_template_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ template_name }} and datacenter={{ dc_info.ovirt_datacenters[0].name }}"
+ register: template_info
+ tags:
+ - ovirt-template-image
+
+ - name: Deploy the qcow image to oVirt engine
+ ovirt_disk:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ template_disk_name | default(template_name) }}"
+ size: "{{ qcow2_size }}"
+ format: "{{ template_disk_format | default(omit) }}"
+ image_path: "{{ downloaded_file.dest }}"
+ storage_domain: "{{ template_disk_storage | default(disk_storage_domain.name) }}"
+ force: "{{ template_info.ovirt_templates | length == 0 }}"
+ register: ovirt_disk
+ when: template_info.ovirt_templates | length == 0
+ tags:
+ - ovirt-template-image
+
+ - name: Wait until the qcow image is unlocked by the oVirt engine
+ ovirt_disk_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "id={{ ovirt_disk.id }}"
+ register: disk_info
+ until: ((ovirt_disk is defined) and (ovirt_disk.disk.status != "locked")) or ((disk_info is defined) and (disk_info.ovirt_disks[0].status != "locked"))
+ retries: 20
+ delay: 3
+ when: template_info.ovirt_templates | length == 0
+ tags:
+ - ovirt-template-image
+
+ - name: Create vm block
+ block:
+ - name: Generate SSH keys
+ ansible.builtin.command: "ssh-keygen -t rsa -f {{ tmp_private_key_file }} -N ''"
+ args:
+ creates: "{{ tmp_private_key_file }}"
+ when: template_prerequisites_tasks is defined
+ delegate_to: localhost
+
+ - name: Create vm
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ vm_name }}"
+ state: "{{ 'running' if template_prerequisites_tasks is defined else 'stopped' }}"
+ cluster: "{{ template_cluster }}"
+ io_threads: "{{ template_io_threads | default(omit) }}"
+ memory: "{{ template_memory | default(omit) }}"
+ memory_guaranteed: "{{ template_memory_guaranteed | default(omit) }}"
+ memory_max: "{{ template_memory_max | default(omit) }}"
+ cpu_cores: "{{ template_cpu }}"
+ operating_system: "{{ template_operating_system }}"
+ type: "{{ template_type | default(omit) }}"
+ bios_type: "{{ template_bios_type | default(omit) }}"
+ cloud_init: "{{ {'user_name': 'root', 'authorized_ssh_keys': lookup('file', tmp_private_key_file~'.pub') } if template_prerequisites_tasks is defined else omit }}" # noqa yaml[line-length]
+ disks:
+ - id: "{{ disk_info.ovirt_disks[0].id }}"
+ bootable: true
+ interface: "{{ template_disk_interface }}"
+ nics: "{{ template_nics }}"
+
+ - name: Manage disks
+ ovirt_disk:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item.name | default(omit) }}"
+ size: "{{ item.size | default(omit) }}"
+ interface: "{{ item.interface | default(template_disk_interface) | default(omit) }}"
+ vm_name: "{{ vm_name }}"
+ format: "{{ item.format | default(template_disk_format) | default(omit) }}"
+ storage_domain: "{{ item.storage_domain | default(template_disk_storage) | default(omit) }}"
+ bootable: "{{ item.bootable | default(omit) }}"
+ wait: true
+ loop: "{{ template_disks }}"
+
+ - name: Wait for IPv4 block
+ block:
+ - name: Wait for VMs IPv4
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ vm_name }}"
+ fetch_nested: true
+ nested_attributes: ips
+ register: vm_info
+ until: "vm_info.ovirt_vms | ovirt.ovirt.ovirtvmipv4 | length > 0"
+ retries: 10
+ delay: 5
+
+ - name: Set Ip of the VM
+ ansible.builtin.set_fact:
+ vm_ip: "{{ vm_info.ovirt_vms | ovirt.ovirt.ovirtvmipv4 }}"
+
+ - name: Include prerequisites tasks for VM
+ import_tasks: "{{ template_prerequisites_tasks if template_prerequisites_tasks is defined else 'empty.yml' }}"
+ delegate_to: "{{ vm_ip }}"
+ vars:
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+ ansible_user: root
+ ansible_ssh_private_key_file: "{{ tmp_private_key_file | default(omit) }}"
+
+ - name: Remove SSH keys
+ ansible.builtin.file:
+ state: absent
+ path: "{{ item }}"
+ delegate_to: localhost
+ with_items:
+ - "{{ tmp_private_key_file }}"
+ - "{{ tmp_private_key_file }}.pub"
+
+ - name: Stop vm
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ vm_name }}"
+ state: stopped
+
+ when: template_prerequisites_tasks is defined
+
+ when: template_info.ovirt_templates | length == 0
+
+ - name: Resize disk block
+ block:
+ - name: Resize disk if smaller than template_disk_size
+ ovirt_disk:
+ id: "{{ disk_info.ovirt_disks[0].id }}"
+ vm_name: "{{ vm_name }}"
+ auth: "{{ ovirt_auth }}"
+ size: "{{ template_disk_size }}"
+
+ - name: Wait for resize
+ ovirt_disk:
+ id: "{{ disk_info.ovirt_disks[0].id }}"
+ auth: "{{ ovirt_auth }}"
+ register: resized_disk
+ until: resized_disk.disk.provisioned_size != disk_info.ovirt_disks[0].provisioned_size
+ retries: "{{ (disk_resize_timeout / 3) | int }}"
+ delay: 3
+ when:
+ - (template_disk_size | regex_replace('GiB') | int) > (qcow2_size | regex_replace('GiB') | int)
+ - template_info.ovirt_templates | length == 0
+
+ - name: Create template
+ ovirt_template:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ template_name }}"
+ vm: "{{ vm_name }}"
+ cluster: "{{ template_cluster }}"
+ timeout: "{{ template_timeout }}"
+ seal: "{{ template_seal }}"
+ when: template_info.ovirt_templates | length == 0
+ tags:
+ - ovirt-template-image
+
+ always:
+ - name: Remove downloaded image
+ ansible.builtin.file:
+ path: "{{ downloaded_file.dest }}"
+ state: absent
+ when: not image_cache_download
+
+ - name: Remove vm
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: absent
+ name: "{{ vm_name }}"
+ when: template_info.ovirt_templates | length == 0
+ tags:
+ - ovirt-template-image
+
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when: not loggedin.skipped | default(false)
+ tags:
+ - always
diff --git a/ansible_collections/ovirt/ovirt/roles/image_template/vars/main.yml b/ansible_collections/ovirt/ovirt/roles/image_template/vars/main.yml
new file mode 100644
index 000000000..3ceedc030
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/image_template/vars/main.yml
@@ -0,0 +1,3 @@
+---
+vm_name: temporary_vm_name_12367123
+tmp_private_key_file: /tmp/.image_template.key
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/README.md b/ansible_collections/ovirt/ovirt/roles/infra/README.md
new file mode 100644
index 000000000..24e3955c7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/README.md
@@ -0,0 +1,438 @@
+oVirt Infra
+===========
+
+The `infra` role enables you to set up oVirt infrastructure including: mac pools, data centers, clusters, networks, hosts, users, and groups.
+
+Target machine
+--------------
+In case you use this role to do user management, it will use `ovirt-aaa-jdbc-tool`, which is located on engine machine,
+so you must execute the role on engine machine.
+
+
+Role Variables
+--------------
+
+### Datacenter
+To setup/cleanup datacenter you can use following variables:
+
+| Name | Default value | Description |
+|--------------------------|-----------------------|--------------------------------------|
+| data_center_name | UNDEF | Name of the data center. |
+| data_center_description | UNDEF | Description of the data center. |
+| data_center_local | false | Specify whether the data center is shared or local. |
+| compatibility_version | UNDEF | Compatibility version of data center. |
+| data_center_state | present | Specify whether the datacenter should be present or absent. |
+| recursive_cleanup | false | Specify whether to recursively remove all entities inside DC. Valid only when state == absent. |
+| format_storages | false | Specify whether to format ALL the storages that are going to be removed as part of the DC. Valid only when data_center_state == absent and recursive_cleanup == true. |
+
+### MAC pools
+To setup MAC pools you can define list variable called `mac_pools`.
+The items in `mac_pools` list variable can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------------------|-----------------------|-------------------------------------------------------------------|
+| mac_pool_name | UNDEF | Name of the the MAC pool to manage. |
+| mac_pool_ranges | UNDEF | List of MAC ranges. The from and to should be splitted by comma. For example: 00:1a:4a:16:01:51,00:1a:4a:16:01:61 |
+| mac_pool_allow_duplicates | UNDEF | If (true) allow a MAC address to be used multiple times in a pool. Default value is set by oVirt engine to false. |
+
+### Clusters
+To setup clusters you can define list variable called `clusters`.
+The items in `clusters` list variable can contain the following parameters:
+
+| Name | Default value | Description |
+|-----------------------------------|---------------------|------------------------------------------|
+| name | UNDEF (Required) | Name of the cluster. |
+| state | present | State of the cluster. |
+| cpu_type | Intel Conroe Family | CPU type of the cluster. |
+| profile | UNDEF | The cluster profile. You can choose a predefined cluster profile, see the tables below. |
+| ballooning | UNDEF | If True enable memory balloon optimization. Memory balloon is used to re-distribute / reclaim the host memory based on VM needs in a dynamic way. |
+| description | UNDEF | Description of the cluster. |
+| ksm | UNDEF | I True MoM enables to run Kernel Same-page Merging KSM when necessary and when it can yield a memory saving benefit that outweighs its CPU cost. |
+| ksm_numa | UNDEF | If True enables KSM ksm for best berformance inside NUMA nodes. |
+| vm_reason | UNDEF | If True enable an optional reason field when a virtual machine is shut down from the Manager, allowing the administrator to provide an explanation for the maintenance. |
+| host_reason | UNDEF | If True enable an optional reason field when a host is placed into maintenance mode from the Manager, allowing the administrator to provide an explanation for the maintenance. |
+| memory_policy<br/>alias: <i>performance_preset</i> | UNDEF | <ul><li>disabled - Disables memory page sharing.</li><li>server - Sets the memory page sharing threshold to 150% of the system memory on each host.</li><li>desktop - Sets the memory page sharing threshold to 200% of the system memory on each host.</li></ul> |
+| migration_policy | UNDEF | A migration policy defines the conditions for live migrating virtual machines in the event of host failure. Following policies are supported:<ul><li>legacy - Legacy behavior of 3.6 version.</li><li>minimal_downtime - Virtual machines should not experience any significant downtime.</li><li>suspend_workload - Virtual machines may experience a more significant downtime.</li><li>post_copy - Virtual machines should not experience any significant downtime. If the VM migration is not converging for a long time, the migration will be switched to post-copy</li></ul> |
+| scheduling_policy | UNDEF | The scheduling policy used by the cluster. |
+| ha_reservation | UNDEF | If True enable the oVirt/RHV to monitor cluster capacity for highly available virtual machines. |
+| fence_enabled | UNDEF | If True, enables fencing on the cluster. |
+| fence_connectivity_threshold | UNDEF | The threshold used by <i>fence_skip_if_connectivity_broken</i>. |
+| fence_skip_if_connectivity_broken | UNDEF | If True, fencing will be temporarily disabled if the percentage of hosts in the cluster that are experiencing connectivity issues is greater than or equal to the defined threshold. |
+| fence_skip_if_sd_active | UNDEF | If True, any hosts in the cluster that are Non Responsive and still connected to storage will not be fenced. |
+| mac_pool | UNDEF | Mac pool name. |
+| comment | UNDEF | Comment of the cluster. |
+| migration_bandwidth | UNDEF | The bandwidth settings define the maximum bandwidth of both outgoing and incoming migrations per host.<br/>Following bandwidth options are supported:<br/><ul><li>auto - Bandwidth is copied from the rate limit [Mbps] setting in the data center host network QoS.</li><li>hypervisor_default - Bandwidth is controlled by local VDSM setting on sending host.</li><li>custom - Defined by user (in Mbps).</li></ul> |
+| migration_bandwidth_limit | UNDEF | Set the custom migration bandwidth limit. |
+| network | UNDEF | Management network of cluster to access cluster hosts. |
+| resilience_policy | UNDEF | The resilience policy defines how the virtual machines are prioritized in the migration.<br/>Following values are supported:<br/><ul><li>do_not_migrate - Prevents virtual machines from being migrated.</li><li>migrate - Migrates all virtual machines in order of their defined priority.</li><li>migrate_highly_available - Migrates only highly available virtual machines to prevent overloading other hosts.</li></ul> |
+| rng_sources | UNDEF | List that specify the random number generator devices that all hosts in the cluster will use. Supported generators are: <i>hwrng</i> and <i>random</i>. |
+| serial_policy | UNDEF | Specify a serial number policy for the virtual machines in the cluster.<br/>Following options are supported:<br/><ul><li>vm - Sets the virtual machine's UUID as its serial number.</li><li>host - Sets the host's UUID as the virtual machine's serial number.</li><li>custom - Allows you to specify a custom serial number in serial_policy_value.</li></ul> |
+| serial_policy_value | UNDEF | Allows you to specify a custom serial number. This parameter is used only when <i>serial_policy</i> is custom. |
+| spice_proxy | UNDEF | The proxy by which the SPICE client will connect to virtual machines. The address must be in the following format: protocol://[host]:[port] |
+| switch_type | UNDEF | Type of switch to be used by all networks in given cluster. Either legacy which is using linux brigde or ovs using Open vSwitch. |
+| threads_as_cores | UNDEF | If True the exposed host threads would be treated as cores which can be utilized by virtual machines. |
+| trusted_service | UNDEF | If True enable integration with an OpenAttestation server.|
+| virt | UNDEF | If True, hosts in this cluster will be used to run virtual machines. Default is true. |
+| gluster | UNDEF | If True, hosts in this cluster will be used as Gluster Storage server nodes, and not for running virtual machines. |
+| external_network_providers | UNDEF | List that specify the external network providers available in the cluster. |
+
+More information about the parameters can be found in the [ovirt_cluster](http://docs.ansible.com/ansible/ovirt_cluster_module.html) module documentation.
+
+#### Cluster Profile
+Possible `profile` options of cluster are `development` and `production`, their default values are described below:
+
+##### Development
+The `development` profile of the cluster have predefined following vaules:
+
+| Parameter | Value |
+|------------------|---------------|
+| ballooning | true |
+| ksm | true |
+| host_reason | false |
+| vm_reason | false |
+| memory_policy | server |
+| migration_policy | post_copy |
+
+##### Production
+The `production` profile of the cluster have predefined following vaules:
+
+| Parameter | Value |
+|-----------------------------------|--------------------|
+| ballooning | false |
+| ksm | false |
+| host_reason | true |
+| vm_reason | true |
+| memory_policy | disabled |
+| migration_policy | suspend_workload |
+| scheduling_policy | evenly_distributed |
+| ha_reservation | true |
+| fence_enabled | true |
+| fence_skip_if_connectivity_broken | true |
+| fence_skip_if_sd_active | true |
+
+### Hosts
+To setup hosts you can define list variable called `hosts`.
+The items in `hosts` list variable can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|------------------|---------------------------------------|
+| name | UNDEF (Required) | Name of the host. |
+| state | present | Specifies whether the host is `present` or `absent`. |
+| address | UNDEF | IP address or FQDN of the host. |
+| password | UNDEF | The host's root password. Required if <i>public_key</i> is false. |
+| public_key | UNDEF | If <i>true</i> the public key should be used to authenticate to host. |
+| cluster | UNDEF (Required) | The cluster that the host must connect to. |
+| timeout | 1800 | Maximum wait time for the host to be in an UP state. |
+| poll_interval | 20 | Polling interval to check the host status. |
+| hosted_engine | UNDEF | Specifies if the host is 'deploy' as hosted engine. |
+| power_management | UNDEF | The power managment. You can choose a predefined variables, see the tables below. |
+
+In case you cannot use `hosts` variable for whatever reason in your playbook, you can change this variable's name
+by overriding value of `hosts_var_name` variable. Example:
+```yaml
+- name: Set up oVirt infrastructure
+ hosts: engine
+
+ roles:
+ - role: ovirt.ovirt.infra
+ vars:
+ hosts_var_name: ovirt_hosts
+ ovirt_hosts:
+ - name: host_0
+ state: present
+ address: 1.2.3.4
+ password: 123456
+ cluster: Default
+```
+
+##### Host power managment
+The `power_management` have predefined following vaules:
+
+| Name | Default value | Description |
+|---------------|------------------|---------------------------------------|
+| address | UNDEF | Address of the power management interface. |
+| state | present | Should the host power managment be present/absent. |
+| username | UNDEF | Username to be used to connect to power management interface. |
+| password | UNDEF | Password of the user specified in C(username) parameter. |
+| type | UNDEF | Type of the power management. oVirt/RHV predefined values are drac5, ipmilan, rsa, bladecenter, alom, apc, apc_snmp, eps, wti, rsb, cisco_ucs, drac7, hpblade, ilo, ilo2, ilo3, ilo4, ilo_ssh, but user can have defined custom type. |
+| options | UNDEF | Dictionary of additional fence agent options (including Power Management slot). Additional information about options can be found at https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md. |
+| port | UNDEF | Power management interface port. |
+
+### Networks
+
+##### Logical networks
+To setup logical networks you can define list variable called `logical_networks`.
+The `logical_networks` list can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| name | UNDEF | Name of the network. |
+| state | present | Specifies whether the network state is `present` or `absent`. |
+| vlan_tag | UNDEF | Specify VLAN tag. |
+| vm_network | True | If True network will be marked as network for VM. |
+| mtu | UNDEF | Maximum transmission unit (MTU) of the network. |
+| description | UNDEF | Description of the network. |
+| clusters | UNDEF | List of dictionaries describing how the network is managed in specific cluster. |
+| external_provider | UNDEF | Name of external network provider. At first it tries to import the network when not found it will create network in external provider. |
+| label | UNDEF | Name of the label to assign to the network. |
+
+More information about the parameters can be found in the [ovirt_network](http://docs.ansible.com/ansible/ovirt_network_module.html) module documentation.
+
+##### Host networks
+To setup host networks you can define list variable called `host_networks`.
+The `host_networks` list can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| name | UNDEF | Name of the host. |
+| state | UNDEF | Specifies whether the network state is `present` or `absent`. |
+| check | UNDEF | If true, verifies the connection between the host and engine. |
+| save | UNDEF | If true, the network configuration will be persistent, by default it is temporary. |
+| bond | UNDEF | Dictionary describing the network bond. |
+| networks | UNDEF | Dictionary describing the networks to be attached to the interface or bond. |
+| labels | UNDEF | List of names of the network label to be assigned to the bond or interface. |
+| interface | UNDEF | Name of the network interface where the logical network should be attached. |
+
+More information about the parameters can be found in the [ovirt_host_network](http://docs.ansible.com/ansible/ovirt_host_network_module.html) module documentation.
+
+### Storages
+To setup storages you can define dictionary variable called `storages`.
+In case of more than one connection, the storage connection update of this domain will be skipped.
+The value of item in `storages` dictionary can contain following parameters (the key is always a name of the storage):
+
+| Name | Default value | Description |
+|-----------------|----------------|---------------------------------------|
+| master | false | If true, the storage will be added as the first storage, meaning it will be the master storage. |
+| domain_function | data | The function of the storage domain. Possible values are: <ul><li>iso</li><li>export</li><li>data</li></ul>. |
+| localfs | UNDEF | Dictionary defining local storage. |
+| nfs | UNDEF | Dictionary defining NFS storage. |
+| iscsi | UNDEF | Dictionary defining iSCSI storage. |
+| posixfs | UNDEF | Dictionary defining PosixFS storage. |
+| fcp | UNDEF | Dictionary defining FCP storage. |
+| glusterfs | UNDEF | Dictionary defining glusterFS storage. |
+| discard_after_delete | UNDEF | If True storage domain blocks will be discarded upon deletion. Enabled by default. This parameter is relevant only for block based storage domains. |
+
+More information about the parameters can be found in the [ovirt_storage_domain](http://docs.ansible.com/ansible/ovirt_storage_domain_module.html) module documentation.
+
+### AAA JDBC
+##### Users
+To setup users in AAA JDBC provider you can define dictionary variable called `users`.
+The items in `users` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| state | present | Specifies whether the user is `present` or `absent`. |
+| name | UNDEF | Name of the user. |
+| authz_name | UNDEF | Authorization provider of the user. |
+| password | UNDEF | Password of the user. |
+| valid_to | UNDEF | Specifies the date that the account remains valid. |
+| attributes | UNDEF | A dict of attributes related to the user. Available attributes: <ul><li>department</li><li>description</li><li>displayName</li><li>email</li><li>firstName</li><li>lasName</li><li>title</li></ul>|
+
+##### User groups
+To setup user groups in AAA JDBC provider you can define dictionary variable called `user_groups`.
+The items in `user_groups` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| state | present | Specifies whether the group is `present` or `absent`. |
+| name | UNDEF | Name of the group. |
+| authz_name | UNDEF | Authorization provider of the group. |
+| users | UNDEF | List of users that belong to this group. |
+
+### Permissions
+To setup permissions of users or groups you can define dictionary variable called `permissions`.
+The items in `permissions` list variable can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|----------------------------|
+| state | present | Specifies whether the state of the permission is `present` or `absent`. |
+| user_name | UNDEF | The user to manage the permission for. |
+| group_name | UNDEF | Name of the group to manage the permission for. |
+| authz_name | UNDEF | Name of the authorization provider of the group or user. |
+| role | UNDEF | The role to be assigned to the user or group. |
+| object_type | UNDEF | The object type which should be used to assign the permission. Possible object types are:<ul><li>data_center</li><li>cluster</li><li>host</li><li>storage_domain</li><li>network</li><li>disk</li><li>vm</li><li>vm_pool</li><li>template</li><li>cpu_profile</li><li>disk_profile</li><li>vnic_profile</li><li>system</li></ul> |
+| object_name | UNDEF | Name of the object where the permission should be assigned. |
+
+### External providers
+To setup external providers you can define dictionary variable called `external_providers`.
+The items in `external_providers` list variable can contain following parameters:
+
+| Name | Default value | Description |
+|------------------------|---------------------|----------------------------------------------------------------------------------|
+| name | UNDEF (Required) | Name of the external provider. |
+| state | present | State of the external provider. Values can be: <ul><li>present</li><li>absent</li></ul>|
+| type | UNDEF (Required) | Type of the external provider. Values can be: <ul><li>os_image</li><li>network</li><li>os_volume</li><li>foreman</li></ul>|
+| url | UNDEF | URL where external provider is hosted. Required if state is present. |
+| username | UNDEF | Username to be used for login to external provider. Applicable for all types. |
+| password | UNDEF | Password of the user specified in username parameter. Applicable for all types. |
+| tenant | UNDEF | Name of the tenant. |
+| auth_url | UNDEF | Keystone authentication URL of the openstack provider. Required for: <ul><li>os_image</li><li>network</li><li>os_volume</li></ul>|
+| data_center | UNDEF | Name of the data center where provider should be attached. Applicable for type <i>os_volume</i>. |
+| authentication_keys | UNDEF | List of authentication keys. Each key is represented by dict like {'uuid': 'my-uuid', 'value': 'secret value'}. Added in ansible 2.6. Applicable for type <i>os_volume</i>. |
+
+More information about the parameters can be found in the [ovirt_external_provider](http://docs.ansible.com/ansible/ovirt_external_provider_module.html) module documentation.
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+ data_center_name: mydatacenter
+ compatibility_version: 4.4
+
+ mac_pools:
+ - mac_pool_name: "Default"
+ mac_pool_ranges:
+ - "00:1a:4a:16:01:51,00:1a:4a:16:01:61"
+
+ clusters:
+ - name: production
+ cpu_type: Intel Conroe Family
+ profile: production
+
+ hosts:
+ - name: myhost
+ address: 1.2.3.4
+ cluster: production
+ password: 123456
+ - name: myhost1
+ address: 5.6.7.8
+ cluster: production
+ password: 123456
+ power_management:
+ address: 9.8.7.6
+ username: root
+ password: password
+ type: ipmilan
+ options:
+ myoption1: x
+ myoption2: y
+ slot: myslot
+
+ storages:
+ mynfsstorage:
+ master: true
+ state: present
+ nfs:
+ address: 10.11.12.13
+ path: /the_path
+ myiscsistorage:
+ state: present
+ iscsi:
+ target: iqn.2014-07.org.ovirt:storage
+ port: 3260
+ address: 100.101.102.103
+ username: username
+ password: password
+ lun_id: 3600140551fcc8348ea74a99b6760fbb4
+ mytemplates:
+ domain_function: export
+ nfs:
+ address: 100.101.102.104
+ path: /exports/nfs/exported
+ myisostorage:
+ domain_function: iso
+ nfs:
+ address: 100.101.102.105
+ path: /exports/nfs/iso
+
+ logical_networks:
+ - name: mynetwork
+ clusters:
+ - name: production
+ assigned: yes
+ required: no
+ display: no
+ migration: yes
+ gluster: no
+
+ host_networks:
+ - name: myhost1
+ check: true
+ save: true
+ bond:
+ name: bond0
+ mode: 2
+ interfaces:
+ - eth2
+ - eth3
+ networks:
+ - name: mynetwork
+ boot_protocol: dhcp
+
+ users:
+ - name: john.doe
+ authz_name: internal-authz
+ password: 123456
+ valid_to: "2018-01-01 00:00:00Z"
+ - name: joe.doe
+ authz_name: internal-authz
+ password: 123456
+ valid_to: "2018-01-01 00:00:00Z"
+
+ user_groups:
+ - name: admins
+ authz_name: internal-authz
+ users:
+ - john.doe
+ - joe.doe
+
+ permissions:
+ - state: present
+ user_name: john.doe
+ authz_name: internal-authz
+ role: UserROle
+ object_type: cluster
+ object_name: production
+
+ - state: present
+ group_name: admins
+ authz_name: internal-authz
+ role: UserVmManager
+ object_type: cluster
+ object_name: production
+
+ external_providers:
+ - name: myglance
+ type: os_image
+ state: present
+ url: http://externalprovider.example.com:9292
+ username: admin
+ password: secret
+ tenant: admin
+ auth_url: http://externalprovider.example.com:35357/v2.0/
+
+ pre_tasks:
+ - name: Login to oVirt
+ ovirt_auth:
+ hostname: "{{ engine_fqdn }}"
+ username: "{{ engine_user }}"
+ password: "{{ engine_password }}"
+ ca_file: "{{ engine_cafile | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ tags:
+ - always
+
+ roles:
+ - ovirt.ovirt.infra
+
+ post_tasks:
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ tags:
+ - always
+```
+
+[![asciicast](https://asciinema.org/a/112415.png)](https://asciinema.org/a/112415)
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/defaults/main.yml
new file mode 100644
index 000000000..b7462989f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+data_center_state: 'present'
+hosts_var_name: 'hosts'
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra.yml b/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra.yml
new file mode 100644
index 000000000..d4cb58261
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra.yml
@@ -0,0 +1,15 @@
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ - vars/ovirt_infra_vars.yml
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - vars/passwords.yml
+
+ roles:
+ - infra
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra_destroy.yml b/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra_destroy.yml
new file mode 100644
index 000000000..bc45dff95
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra_destroy.yml
@@ -0,0 +1,43 @@
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ recursive_cleanup: true
+ format_storages: true
+ data_center_name: Default
+ data_center_state: absent
+
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - vars/passwords.yml
+
+ pre_tasks:
+ - name: Login to oVirt
+ ovirt_auth:
+ hostname: "{{ engine_fqdn }}"
+ username: "{{ engine_user }}"
+ password: "{{ engine_password }}"
+ ca_file: "{{ engine_cafile | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ tags:
+ - always
+
+ roles:
+ - infra
+
+ post_tasks:
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ tags:
+ - always
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/ovirt_infra_vars.yml b/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/ovirt_infra_vars.yml
new file mode 100644
index 000000000..81f01c15e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/ovirt_infra_vars.yml
@@ -0,0 +1,108 @@
+---
+###########################
+# REST API variables
+###########################
+engine_fqdn: ovirt-engine.example.com
+engine_user: admin@internal
+engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+###########################
+# Common
+###########################
+compatibility_version: 4.4
+
+# Data center
+data_center_name: Default
+
+###########################
+# Clusters
+###########################
+clusters:
+ - name: Default
+ cpu_type: Intel Conroe Family
+ profile: production
+
+###########################
+# Hosts
+###########################
+hosts:
+ - name: host1
+ address: 1.2.3.5
+ cluster: Default
+ password: 123456
+ - name: host2
+ address: 1.2.3.6
+ cluster: Default
+ password: 123456
+
+###########################
+# Storage
+###########################
+storages:
+ data:
+ master: true
+ state: present
+ nfs:
+ address: 1.2.3.4
+ path: /om02
+
+###########################
+# Networks
+###########################
+logical_networks:
+ - name: int_network_002
+ clusters:
+ - name: Default
+ assigned: true
+ required: true
+ display: false
+ migration: true
+ gluster: false
+
+host_networks:
+ - name: host2
+ check: true
+ save: true
+ bond:
+ name: bond0
+ mode: 2
+ interfaces:
+ - eth1
+ - eth2
+ networks:
+ - name: int_network_002
+ boot_protocol: dhcp
+
+###########################
+# Users & Groups
+###########################
+users:
+ - name: user1
+ authz_name: internal-authz
+ password: 1234568
+ valid_to: "2018-01-01 00:00:00Z"
+ - name: user2
+ authz_name: internal-authz
+ password: 1234568
+ valid_to: "2018-01-01 00:00:00Z"
+
+user_groups:
+ - name: group1
+ authz_name: internal-authz
+ users:
+ - user1
+
+permissions:
+ - state: present
+ user_name: user1
+ authz_name: internal-authz
+ role: UserRole
+ object_type: cluster
+ object_name: Default
+
+ - state: present
+ group_name: group1
+ authz_name: internal-authz
+ role: UserRole
+ object_type: cluster
+ object_name: Default
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/passwords.yml b/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/passwords.yml
new file mode 100644
index 000000000..92c7613c9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/README.md b/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/README.md
new file mode 100644
index 000000000..b20ca5280
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/README.md
@@ -0,0 +1,60 @@
+oVirt AAA JDBC
+==============
+
+The `aaa_jdbc` role manages users and groups in an AAA JDBC extension.
+
+Role Variables
+--------------
+
+The items in `users` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| state | present | Specifies whether the user is `present` or `absent`. |
+| name | UNDEF | Name of the user. |
+| authz_name | UNDEF | Authorization provider of the user. |
+| password | UNDEF | Password of the user. |
+| valid_to | UNDEF | Specifies the date that the account remains valid. |
+| attributes | UNDEF | A dict of attributes related to the user. Available attributes: <ul><li>department</li><li>description</li><li>displayName</li><li>email</li><li>firstName</li><li>lasName</li><li>title</li></ul>|
+
+The items in `user_groups` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| state | present | Specifies whether the group is `present` or `absent`. |
+| name | UNDEF | Name of the group. |
+| authz_name | UNDEF | Authorization provider of the group. |
+| users | UNDEF | List of users that belong to this group. |
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt AAA jdbc
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ users:
+ - name: user1
+ authz_name: internal-authz
+ password: 1234568
+ valid_to: "2018-01-01 00:00:00Z"
+ - name: user2
+ authz_name: internal-authz
+ password: 1234568
+ valid_to: "2018-01-01 00:00:00Z"
+ attributes:
+ firstName: 'alice'
+ department: 'Quality Engineering'
+
+ user_groups:
+ - name: group1
+ authz_name: internal-authz
+ users:
+ - user1
+
+ roles:
+ - ovirt.ovirt.infra.roles.aaa_jdbc
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/defaults/main.yml
new file mode 100644
index 000000000..260312dbb
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+aaa_jdbc_prefix: /usr/bin
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/tasks/main.yml
new file mode 100644
index 000000000..4568a2a83
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/tasks/main.yml
@@ -0,0 +1,88 @@
+---
+################################
+## User & groups internal
+################################
+- name: Check if ovirt-aaa-jdbc-tool exists
+ ansible.builtin.stat:
+ path: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool"
+ register: aaa_jdbc_path_stat
+ tags:
+ - ovirt-aaa-jdbc
+ - users
+
+- name: Fail the role if aaa-jdbc-tool don't exist
+ ansible.builtin.fail:
+ msg: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool doesn't exist, are you on engine machine?"
+ when: not aaa_jdbc_path_stat.stat.exists
+ tags:
+ - ovirt-aaa-jdbc
+ - users
+
+- name: Manage internal users
+ no_log: true
+ ansible.builtin.command: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool user {{ (item.state is undefined or item.state == 'present') | ternary('add','delete') }} {{ item.name }}" # noqa yaml[line-length]
+ with_items:
+ - "{{ users | default([]) }}"
+ register: out_users
+ changed_when: "out_users.rc != 5 and out_users.rc != 4"
+ # 5 == user already exists
+ # 4 == no user to be removed
+ # 0 == all OK
+ failed_when: "out_users.rc != 5 and out_users.rc != 0 and out_users.rc != 4"
+ tags:
+ - ovirt-aaa-jdbc
+ - users
+
+- name: Update users according to attributes
+ ansible.builtin.command: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool user edit {{ item.name }} {% for attr, value in item['attributes'].items() %} --attribute={{ attr }}='{{ value }}' {% endfor %}" # noqa yaml[line-length]
+ with_items:
+ - "{{ users | default([]) }}"
+ register: out_users
+ when: "item.attributes is defined"
+ changed_when: "out_users.rc != 5 and out_users.rc != 4"
+ # 5 == user already exists
+ # 4 == no user to be removed
+ # 0 == all OK
+ failed_when: "out_users.rc != 5 and out_users.rc != 0 and out_users.rc != 4"
+ tags:
+ - ovirt-aaa-jdbc
+ - users
+
+# FIXME: when user try to change the password which was already set in history
+# but is not current password we continue with changed=false:
+- name: Manage internal users passwords
+ no_log: true
+ ansible.builtin.command: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool user password-reset {{ item.name }} --password=\"pass:{{ item.password }}\" --password-valid-to=\"{{ item.valid_to }}\"" # noqa yaml[line-length]
+ with_items:
+ - "{{ users | default([]) }}"
+ register: out_users
+ when: "item.password is defined"
+ changed_when: "out_users.rc != 1"
+ failed_when: "out_users.rc != 1 and out_users.rc != 0"
+ tags:
+ - ovirt-aaa-jdbc
+ - users
+
+- name: Manage internal groups
+ ansible.builtin.command: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool group {{ (item.state is undefined or item.state == 'present') | ternary('add','delete') }} {{ item.name }}" # noqa yaml[line-length]
+ with_items:
+ - "{{ user_groups | default([]) }}"
+ register: out_groups
+ changed_when: "out_groups.rc != 5 and out_groups.rc != 4"
+ failed_when: "out_groups.rc != 5 and out_groups.rc != 0 and out_groups.rc != 4"
+ tags:
+ - ovirt-aaa-jdbc
+ - user_groups
+
+# FIXME: Support only add, if the user is removed from list, it's not removed from the group:
+- name: Manage groups members
+ ansible.builtin.command: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool group-manage {{ (item.state is undefined or item.state == 'present') | ternary('useradd','userdel') }} {{ item.0.name }} --user {{ item.1 }}" # noqa yaml[line-length]
+ with_subelements:
+ - "{{ user_groups | default([]) }}"
+ - users
+ register: out_group_member
+ changed_when: "out_group_member.rc != 3 "
+ failed_when: "out_group_member.rc != 3 and out_group_member.rc != 0"
+ tags:
+ - ovirt-aaa-jdbc
+ - user_groups
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/README.md b/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/README.md
new file mode 100644
index 000000000..c05c940fc
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/README.md
@@ -0,0 +1,106 @@
+oVirt Clusters
+==============
+
+The `clusters` role is used set up oVirt clusters.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|-----------------------|-----------------------|-----------------------------------------|
+| clusters | UNDEF | List of dictionaries that describe the cluster. |
+| data_center_name | UNDEF (Required) | Name of the data center. |
+| compatibility_version | UNDEF (Required) | Compatibility version of data center. |
+
+The items in `clusters` list can contain the following parameters:
+
+| Name | Default value | Description |
+|-----------------------------------|---------------------|-----------------------------------------|
+| name | UNDEF (Required) | Name of the cluster. |
+| state | present | State of the cluster. |
+| cpu_type | Intel Conroe Family | CPU type of the cluster. |
+| profile | UNDEF | The cluster profile. You can choose a predefined cluster profile, see the tables below. |
+| ballooning | UNDEF | If True enable memory balloon optimization. Memory balloon is used to re-distribute / reclaim the host memory based on VM needs in a dynamic way. |
+| description | UNDEF | Description of the cluster. |
+| ksm | UNDEF | I True MoM enables to run Kernel Same-page Merging KSM when necessary and when it can yield a memory saving benefit that outweighs its CPU cost. |
+| ksm_numa | UNDEF | If True enables KSM ksm for best berformance inside NUMA nodes. |
+| vm_reason | UNDEF | If True enable an optional reason field when a virtual machine is shut down from the Manager, allowing the administrator to provide an explanation for the maintenance. |
+| host_reason | UNDEF | If True enable an optional reason field when a host is placed into maintenance mode from the Manager, allowing the administrator to provide an explanation for the maintenance. |
+| memory_policy<br/>alias: <i>performance_preset</i> | UNDEF | <ul><li>disabled - Disables memory page sharing.</li><li>server - Sets the memory page sharing threshold to 150% of the system memory on each host.</li><li>desktop - Sets the memory page sharing threshold to 200% of the system memory on each host.</li></ul> |
+| migration_policy | UNDEF | A migration policy defines the conditions for live migrating virtual machines in the event of host failure. Following policies are supported:<ul><li>legacy - Legacy behavior of 3.6 version.</li><li>minimal_downtime - Virtual machines should not experience any significant downtime.</li><li>suspend_workload - Virtual machines may experience a more significant downtime.</li><li>post_copy - Virtual machines should not experience any significant downtime. If the VM migration is not converging for a long time, the migration will be switched to post-copy</li></ul> |
+| scheduling_policy | UNDEF | The scheduling policy used by the cluster. |
+| ha_reservation | UNDEF | If True enable the oVirt/RHV to monitor cluster capacity for highly available virtual machines. |
+| fence_enabled | UNDEF | If True, enables fencing on the cluster. |
+| fence_connectivity_threshold | UNDEF | The threshold used by <i>fence_skip_if_connectivity_broken</i>. |
+| fence_skip_if_connectivity_broken | UNDEF | If True, fencing will be temporarily disabled if the percentage of hosts in the cluster that are experiencing connectivity issues is greater than or equal to the defined threshold. |
+| fence_skip_if_sd_active | UNDEF | If True, any hosts in the cluster that are Non Responsive and still connected to storage will not be fenced. |
+| mac_pool | UNDEF | Mac pool name. |
+| comment | UNDEF | Comment of the cluster. |
+| migration_bandwidth | UNDEF | The bandwidth settings define the maximum bandwidth of both outgoing and incoming migrations per host.<br/>Following bandwidth options are supported:<br/><ul><li>auto - Bandwidth is copied from the rate limit [Mbps] setting in the data center host network QoS.</li><li>hypervisor_default - Bandwidth is controlled by local VDSM setting on sending host.</li><li>custom - Defined by user (in Mbps).</li></ul> |
+| migration_bandwidth_limit | UNDEF | Set the custom migration bandwidth limit. |
+| network | UNDEF | Management network of cluster to access cluster hosts. |
+| resilience_policy | UNDEF | The resilience policy defines how the virtual machines are prioritized in the migration.<br/>Following values are supported:<br/><ul><li>do_not_migrate - Prevents virtual machines from being migrated.</li><li>migrate - Migrates all virtual machines in order of their defined priority.</li><li>migrate_highly_available - Migrates only highly available virtual machines to prevent overloading other hosts.</li></ul> |
+| rng_sources | UNDEF | List that specify the random number generator devices that all hosts in the cluster will use. Supported generators are: <i>hwrng</i> and <i>random</i>. |
+| serial_policy | UNDEF | Specify a serial number policy for the virtual machines in the cluster.<br/>Following options are supported:<br/><ul><li>vm - Sets the virtual machine's UUID as its serial number.</li><li>host - Sets the host's UUID as the virtual machine's serial number.</li><li>custom - Allows you to specify a custom serial number in serial_policy_value.</li></ul> |
+| serial_policy_value | UNDEF | Allows you to specify a custom serial number. This parameter is used only when <i>serial_policy</i> is custom. |
+| spice_proxy | UNDEF | The proxy by which the SPICE client will connect to virtual machines. The address must be in the following format: protocol://[host]:[port] |
+| switch_type | UNDEF | Type of switch to be used by all networks in given cluster. Either legacy which is using linux brigde or ovs using Open vSwitch. |
+| threads_as_cores | UNDEF | If True the exposed host threads would be treated as cores which can be utilized by virtual machines. |
+| trusted_service | UNDEF | If True enable integration with an OpenAttestation server.|
+| virt | UNDEF | If True, hosts in this cluster will be used to run virtual machines. Default is true. |
+| gluster | UNDEF | If True, hosts in this cluster will be used as Gluster Storage server nodes, and not for running virtual machines. |
+| external_network_providers | UNDEF | List that specify the external network providers available in the cluster. |
+
+More information about the parameters can be found in the [Ansible documentation](http://docs.ansible.com/ansible/ovirt_cluster_module.html).
+
+Possible `profile` options are `development` and `production`, their default values are described below:
+
+`Development`:
+
+| Parameter | Value |
+|------------------|---------------|
+| ballooning | true |
+| ksm | true |
+| host_reason | false |
+| vm_reason | false |
+| memory_policy | server |
+| migration_policy | post_copy |
+
+`Production`:
+
+| Parameter | Value |
+|-----------------------------------|--------------------|
+| ballooning | false |
+| ksm | false |
+| host_reason | true |
+| vm_reason | true |
+| memory_policy | disabled |
+| migration_policy | suspend_workload |
+| scheduling_policy | evenly_distributed |
+| ha_reservation | true |
+| fence_enabled | true |
+| fence_skip_if_connectivity_broken | true |
+| fence_skip_if_sd_active | true |
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt clusters
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ data_center_name: mydatacenter
+ compatibility_version: 4.4
+
+ clusters:
+ - name: production
+ cpu_type: Intel Conroe Family
+ profile: production
+ mac_pool: production_mac_pools
+
+ roles:
+ - ovirt.ovirt.infra.roles.clusters
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/tasks/main.yml
new file mode 100644
index 000000000..33d80eb38
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/tasks/main.yml
@@ -0,0 +1,45 @@
+---
+- name: Add clusters
+ ovirt_cluster:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ name: "{{ item.name }}"
+ data_center: "{{ data_center_name }}"
+ cpu_type: "{{ item.cpu_type | default('Intel Conroe Family') }}"
+ compatibility_version: "{{ compatibility_version }}"
+ mac_pool: "{{ item.mac_pool | default(omit) }}"
+ comment: "{{ item.comment | default(omit) }}"
+ external_network_providers: "{{ item.external_network_providers | default(omit) }}"
+ fence_connectivity_threshold: "{{ item.fence_connectivity_threshold | default(omit) }}"
+ gluster: "{{ item.gluster | default(omit) }}"
+ migration_bandwidth: "{{ item.migration_bandwidth | default(omit) }}"
+ migration_bandwidth_limit: "{{ item.migration_bandwidth_limit | default(omit) }}"
+ network: "{{ item.network | default(omit) }}"
+ resilience_policy: "{{ item.resilience_policy | default(omit) }}"
+ rng_sources: "{{ item.rng_sources | default(omit) }}"
+ serial_policy: "{{ item.serial_policy | default(omit) }}"
+ serial_policy_value: "{{ item.serial_policy_value | default(omit) }}"
+ spice_proxy: "{{ item.spice_proxy | default(omit) }}"
+ switch_type: "{{ item.switch_type | default(omit) }}"
+ threads_as_cores: "{{ item.threads_as_cores | default(omit) }}"
+ trusted_service: "{{ item.trusted_service | default(omit) }}"
+ virt: "{{ item.virt | default(omit) }}"
+ # Parameters part of profile:
+ ballooning: "{{ item.ballooning | default(profiles[item.profile | default('_')].ballooning) | default(omit) }}"
+ description: "{{ item.description | default(profiles[item.profile | default('_')].description) | default(omit) }}"
+ ksm: "{{ item.ksm | default(profiles[item.profile | default('_')].ksm) | default(omit) }}"
+ ksm_numa: "{{ item.ksm_numa | default(profiles[item.profile | default('_')].ksm_numa) | default(omit) }}"
+ host_reason: "{{ item.host_reason | default(profiles[item.profile | default('_')].host_reason) | default(omit) }}"
+ vm_reason: "{{ item.vm_reason | default(profiles[item.profile | default('_')].vm_reason) | default(omit) }}"
+ memory_policy: "{{ item.memory_policy | default(item.performance_preset) | default(profiles[item.profile | default('_')].memory_policy) | default(profiles[item.profile | default('_')].performance_preset) | default('disabled') }}" # noqa yaml[line-length]
+ migration_policy: "{{ item.migration_policy | default(profiles[item.profile | default('_')].migration_policy) | default(omit) }}"
+ scheduling_policy: "{{ item.scheduling_policy | default(profiles[item.profile | default('_')].scheduling_policy) | default(omit) }}"
+ ha_reservation: "{{ item.ha_reservation | default(profiles[item.profile | default('_')].ha_reservation) | default(omit) }}"
+ fence_enabled: "{{ item.fence_enabled | default(profiles[item.profile | default('_')].fence_enabled) | default(omit) }}"
+ fence_skip_if_connectivity_broken: "{{ item.fence_skip_if_connectivity_broken | default(profiles[item.profile | default('_')].fence_skip_if_connectivity_broken) | default(omit) }}" # noqa yaml[line-length]
+ fence_skip_if_sd_active: "{{ item.fence_skip_if_sd_active | default(profiles[item.profile | default('_')].fence_skip_if_sd_active) | default(omit) }}"
+
+ with_items:
+ - "{{ clusters | default([]) }}"
+ tags:
+ - clusters
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/vars/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/vars/main.yml
new file mode 100644
index 000000000..9a82754b3
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/vars/main.yml
@@ -0,0 +1,27 @@
+---
+# Define a profiles for cluster
+profiles:
+ development:
+ description: Development cluster
+ ballooning: true
+ ksm: true
+ host_reason: false
+ vm_reason: false
+ memory_policy: server
+ migration_policy: post_copy
+
+ production:
+ description: Production cluster
+ ballooning: false
+ ksm: false
+ host_reason: true
+ vm_reason: true
+ memory_policy: disabled
+ migration_policy: suspend_workload
+ scheduling_policy: evenly_distributed
+ ha_reservation: true
+ fence_enabled: true
+ fence_skip_if_connectivity_broken: true
+ fence_skip_if_sd_active: true
+
+ _: false
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/README.md b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/README.md
new file mode 100644
index 000000000..215b39ee7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/README.md
@@ -0,0 +1,30 @@
+oVirt Datacenter Cleanup
+========================
+
+The `datacenter_cleanup` role is used to cleanup all entities inside
+oVirt datacenters and finally remove the datacenters themselves.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|--------------------------|-----------------------|--------------------------------------|
+| data_center_name | UNDEF | Name of the data center. |
+| format_storages | false | Whether role should format storages when removing them. |
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ data_center_name: mydatacenter
+ format_storages: true
+
+ roles:
+ - ovirt.ovirt.infra.roles.datacenter_cleanup
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/defaults/main.yml
new file mode 100644
index 000000000..336a45af0
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+format_storages: false
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/cluster_and_hosts.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/cluster_and_hosts.yml
new file mode 100644
index 000000000..64ec4a889
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/cluster_and_hosts.yml
@@ -0,0 +1,21 @@
+---
+- name: Find existing Hosts
+ ovirt_host_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "cluster={{ cluster_item.name }}"
+ register: host_info
+
+- name: Remove Hosts
+ ovirt_host:
+ state: absent
+ name: "{{ host_item.name }}"
+ auth: "{{ ovirt_auth }}"
+ with_items: "{{ host_info.ovirt_hosts }}"
+ loop_control:
+ loop_var: host_item
+
+- name: Remove Cluster
+ ovirt_cluster:
+ state: absent
+ name: "{{ cluster_item.name }}"
+ auth: "{{ ovirt_auth }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/datacenter.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/datacenter.yml
new file mode 100644
index 000000000..28b1b78a1
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/datacenter.yml
@@ -0,0 +1,6 @@
+---
+- name: Remove Datacenter
+ ovirt_datacenter:
+ state: absent
+ name: "{{ data_center_name }}"
+ auth: "{{ ovirt_auth }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/disks.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/disks.yml
new file mode 100644
index 000000000..5cbf9dd35
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/disks.yml
@@ -0,0 +1,16 @@
+---
+- name: Find existing Disks
+ ovirt_disk_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: disk_info
+
+- name: Remove Disks
+ ovirt_disk:
+ state: absent
+ id: "{{ ovirt_item.id }}"
+ auth: "{{ ovirt_auth }}"
+ with_items: "{{ disk_info.ovirt_disks }}"
+ when: ovirt_item.name != 'OVF_STORE'
+ loop_control:
+ loop_var: ovirt_item
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/main.yml
new file mode 100644
index 000000000..972e23f64
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/main.yml
@@ -0,0 +1,39 @@
+---
+- name: Remove VMPools
+ include: vm_pools.yml
+
+- name: Remove VMs
+ include: vms.yml
+
+- name: Remove Templates
+ include: templates.yml
+
+- name: Remove Disks
+ include: disks.yml
+
+- name: Find existing Storage Domains in Datacenter
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: sd_info
+
+- name: Remove all Storage Domains except master
+ include: storages_pre.yml
+
+- name: Find existing clusters in Datacenter
+ ovirt_cluster_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: cluster_info
+
+- name: Remove Datacenter
+ include: datacenter.yml
+
+- name: Remove master Storage Domain
+ include: storages_last.yml
+
+- name: Remove Clusters and Hosts
+ include: cluster_and_hosts.yml
+ with_items: "{{ cluster_info.ovirt_clusters }}"
+ loop_control:
+ loop_var: cluster_item
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_last.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_last.yml
new file mode 100644
index 000000000..9b7f09587
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_last.yml
@@ -0,0 +1,11 @@
+---
+- name: Remove master Storage Domain
+ ovirt_storage_domain:
+ state: absent
+ id: "{{ ovirt_item.id }}"
+ auth: "{{ ovirt_auth }}"
+ format: "{{ format_storages }}"
+ with_items: "{{ ovirt_storage_domains }}"
+ when: ovirt_item.master
+ loop_control:
+ loop_var: ovirt_item
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_pre.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_pre.yml
new file mode 100644
index 000000000..26c707107
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_pre.yml
@@ -0,0 +1,28 @@
+---
+- name: Find existing Storage Domains
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: sd_info
+
+- name: Remove Storage Domains apart from master
+ ovirt_storage_domain:
+ state: absent
+ id: "{{ ovirt_item.id }}"
+ auth: "{{ ovirt_auth }}"
+ format: "{{ format_storages }}"
+ with_items: "{{ sd_info.ovirt_storage_domains }}"
+ when: not ovirt_item.master
+ loop_control:
+ loop_var: ovirt_item
+
+- name: Put in maintainance master Storage Domain
+ ovirt_storage_domain:
+ state: maintenance
+ id: "{{ ovirt_item.id }}"
+ data_center: "{{ data_center_name }}"
+ auth: "{{ ovirt_auth }}"
+ with_items: "{{ sd_info.ovirt_storage_domains }}"
+ when: ovirt_item.master
+ loop_control:
+ loop_var: ovirt_item
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/templates.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/templates.yml
new file mode 100644
index 000000000..c26d26ddb
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/templates.yml
@@ -0,0 +1,16 @@
+---
+- name: Find existing Templates
+ ovirt_template_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: template_info
+
+- name: Remove Templates
+ ovirt_template:
+ state: absent
+ auth: "{{ ovirt_auth }}"
+ id: "{{ ovirt_item.id }}"
+ with_items: "{{ template_info.ovirt_templates }}"
+ when: ovirt_item.name != 'Blank'
+ loop_control:
+ loop_var: ovirt_item
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vm_pools.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vm_pools.yml
new file mode 100644
index 000000000..90eae12f3
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vm_pools.yml
@@ -0,0 +1,15 @@
+---
+- name: Find existing VMPools
+ ovirt_vmpool_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: vmpool_info
+
+- name: Remove VMPools
+ ovirt_vmpool:
+ state: absent
+ name: "{{ ovirt_item.name }}"
+ auth: "{{ ovirt_auth }}"
+ with_items: "{{ vmpool_info.ovirt_vm_pools }}"
+ loop_control:
+ loop_var: ovirt_item
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vms.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vms.yml
new file mode 100644
index 000000000..9e86960dd
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vms.yml
@@ -0,0 +1,15 @@
+---
+- name: Find existing VMs
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: vm_info
+
+- name: Remove VMs
+ ovirt_vm:
+ state: absent
+ id: "{{ ovirt_item.id }}"
+ auth: "{{ ovirt_auth }}"
+ with_items: "{{ vm_info.ovirt_vms }}"
+ loop_control:
+ loop_var: ovirt_item
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/README.md b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/README.md
new file mode 100644
index 000000000..8baf33ec8
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/README.md
@@ -0,0 +1,56 @@
+oVirt Datacenters
+=================
+
+The `datacenters` role is used to set up or cleanup oVirt datacenters.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|--------------------------|-----------------------|--------------------------------------|
+| data_center_name | UNDEF | Name of the data center. |
+| data_center_description | UNDEF | Description of the data center. |
+| data_center_local | false | Specify whether the data center is shared or local. |
+| compatibility_version | UNDEF | Compatibility version of data center. |
+| data_center_state | present | Specify whether the datacenter should be present or absent. |
+| recursive_cleanup | false | Specify whether to recursively remove all entities inside DC. Valid only when state == absent. |
+| format_storages | false | Specify whether to format ALL the storages that are going to be removed as part of the DC. Valid only when data_center_state == absent and recursive_cleanup == true. |
+
+Example Playbooks
+----------------
+
+```yaml
+# Example 1
+
+- name: Add oVirt datacenter
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ data_center_name: mydatacenter
+ data_center_description: mydatacenter
+ data_center_local: false
+ compatibility_version: 4.4
+
+ roles:
+ - ovirt.ovirt.infra.roles.datacenters
+```
+
+```yaml
+# Example 2
+
+- name: Recursively remove oVirt datacenter
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ data_center_name: mydatacenter
+ data_center_state: absent
+ recursive_cleanup: true
+ format_storages: true
+
+ roles:
+ - ovirt.ovirt.infra.roles.datacenters
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/defaults/main.yml
new file mode 100644
index 000000000..aa6ffe117
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+data_center_state: 'present'
+recursive_cleanup: false
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/tasks/main.yml
new file mode 100644
index 000000000..fb9ca0b01
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+- name: Add data center
+ ovirt_datacenter:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ name: "{{ data_center_name }}"
+ description: "{{ data_center_description | default(omit) }}"
+ local: "{{ data_center_local | default(false) }}"
+ compatibility_version: "{{ compatibility_version }}"
+ when: data_center_state=='present'
+ tags:
+ - datacenters
+
+- name: Remove data center
+ ovirt_datacenter:
+ auth: "{{ ovirt_auth }}"
+ state: absent
+ name: "{{ data_center_name }}"
+ when: data_center_state=='absent' and not recursive_cleanup
+ tags:
+ - datacenters
+
+- name: Recursively remove data center
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.infra.roles.datacenter_cleanup
+ when: data_center_state=='absent' and recursive_cleanup
+ tags:
+ - datacenters
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/README.md b/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/README.md
new file mode 100644
index 000000000..11b16fef3
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/README.md
@@ -0,0 +1,73 @@
+oVirt External Providers
+========================
+
+The `external_providers` role is used set up oVirt external providers.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|-----------------------|-----------------------|-----------------------------------------------------------|
+| external_providers | UNDEF | List of dictionaries that describe the external provider. |
+
+The items in `external_providers` list can contain the following parameters:
+
+| Name | Default value | Description |
+|------------------------|---------------------|----------------------------------------------------------------------------------|
+| name | UNDEF (Required) | Name of the external provider. |
+| state | present | State of the external provider. Values can be: <ul><li>present</li><li>absent</li></ul>|
+| type | UNDEF (Required) | Type of the external provider. Values can be: <ul><li>os_image</li><li>network</li><li>os_volume</li><li>foreman</li></ul>|
+| url | UNDEF | URL where external provider is hosted. Required if state is present. |
+| username | UNDEF | Username to be used for login to external provider. Applicable for all types. |
+| password | UNDEF | Password of the user specified in username parameter. Applicable for all types. |
+| tenant | UNDEF | Name of the tenant. |
+| auth_url | UNDEF | Keystone authentication URL of the openstack provider. Required for: <ul><li>os_image</li><li>network</li><li>os_volume</li></ul>|
+| data_center | UNDEF | Name of the data center where provider should be attached. Applicable for type <i>os_volume</i>. |
+| authentication_keys | UNDEF | List of authentication keys. Each key is represented by dict like {'uuid': 'my-uuid', 'value': 'secret value'}. Added in ansible 2.6. Applicable for type <i>os_volume</i>. |
+
+More information about the parameters can be found in the [Ansible documentation](http://docs.ansible.com/ansible/latest/ovirt_external_provider_module.html).
+
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+
+ external_providers:
+ - name: myglance
+ type: os_image
+ state: present
+ url: http://externalprovider.example.com:9292
+ username: admin
+ password: secret
+ tenant: admin
+ auth_url: http://externalprovider.example.com:35357/v2.0
+ - name: mycinder
+ type: os_volume
+ state: present
+ url: http://externalprovider.example.com:9292
+ username: admin
+ password: secret
+ tenant: admin
+ auth_url: http://externalprovider.example.com:5000/v2.0
+ authentication_keys:
+ -
+ uuid: "1234567-a1234-12a3-a234-123abc45678"
+ value: "ABCD00000000111111222333445w=="
+ - name: public-glance
+ type: os_image
+ state: present
+ url: http://glance.public.com:9292
+ - name: external-provider-to-be-removed
+ type: os_image
+ state: absent
+
+ roles:
+ - ovirt.ovirt.infra.roles.external_providers
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/tasks/main.yml
new file mode 100644
index 000000000..60fc55118
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- name: Add external providers
+ ovirt_external_provider:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item.name }}"
+ state: "{{ item.state | default('present') }}"
+ type: "{{ item.type }}"
+ url: "{{ item.url }}"
+ password: "{{ item.password | default(omit) }}"
+ tenant: "{{ item.tenant | default(omit) }}"
+ auth_url: "{{ item.auth_url | default(omit) }}"
+ data_center: "{{ item.data_center | default(omit) }}"
+ username: "{{ item.username | default(omit) }}"
+ authentication_keys: "{{ item.authentication_keys | default(omit) }}"
+ with_items:
+ - "{{ external_providers | default([]) }}"
+ tags:
+ - external_providers
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/README.md b/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/README.md
new file mode 100644
index 000000000..8d750d53e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/README.md
@@ -0,0 +1,43 @@
+oVirt Hosts
+===========
+
+The `hosts` role is used to set up oVirt hosts.
+
+Role Variables
+--------------
+
+The `hosts` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|------------------|---------------------------------------|
+| name | UNDEF (Required) | Name of the host. |
+| state | present | Specifies whether the host is `present` or `absent`. |
+| address | UNDEF (Required) | IP address or FQDN of the host. |
+| password | UNDEF | The host's root password. Required if <i>public_key</i> is false. |
+| public_key | UNDEF | If <i>true</i> the public key should be used to authenticate to host. |
+| cluster | UNDEF (Required) | The cluster that the host must connect to. |
+| timeout | 1200 | Maximum wait time for the host to be in an UP state. |
+| poll_interval | 20 | Polling interval to check the host status. |
+| hosted_engine | UNDEF | Specifies whether to 'deploy' or 'undeploy' hosted-engine to node. |
+| reboot_after_installation | UNDEF | If true reboot host after successful installation. |
+| reboot_after_upgrade | UNDEF | If true reboot host after successful upgrade. |
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ hosts:
+ - name: myhost
+ address: 1.2.3.4
+ cluster: production
+ password: 123456
+
+ roles:
+ - ovirt.ovirt.infra.roles.hosts
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/defaults/main.yml
new file mode 100644
index 000000000..51f58307e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+ovirt_hosts_max_timeout: 2100
+ovirt_hosts_add_timeout: 1800
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/tasks/main.yml
new file mode 100644
index 000000000..849df0ae5
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/tasks/main.yml
@@ -0,0 +1,88 @@
+---
+- name: Get hosts
+ ovirt_host_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "{{ ovirt_infra_hosts | map(attribute='name') | map('regex_replace', '(.*)', 'name=\\1') | list | join(' or ') }} status=installfailed"
+ register: host_info
+ when: ovirt_infra_hosts | length > 0
+ tags:
+ - hosts
+ - reinstall
+
+- name: Reinstall hosts
+ ovirt_host:
+ auth: "{{ ovirt_auth }}"
+ state: reinstalled
+ name: "{{ item.name }}"
+ public_key: "{{ item.password is undefined }}"
+ password: "{{ item.password | default(omit) }}"
+ with_items:
+ - "{{ host_info.ovirt_hosts | default([]) }}"
+ loop_control:
+ label: "{{ item.name }}"
+ tags:
+ - hosts
+ - reinstall
+
+- name: Add hosts
+ ovirt_host:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ name: "{{ item.name }}"
+ address: "{{ item.address | default(omit) }}"
+ cluster: "{{ item.cluster }}"
+ password: "{{ item.password | default(omit) }}"
+ public_key: "{{ item.public_key | default(omit) }}"
+ override_iptables: true
+ timeout: "{{ item.timeout | default(ovirt_hosts_add_timeout) }}"
+ poll_interval: "{{ item.poll_interval | default(20) }}"
+ hosted_engine: "{{ item.hosted_engine | default(omit) }}"
+ reboot_after_installation: "{{ item.reboot_after_installation | default(omit) }}"
+ reboot_after_upgrade: "{{ item.reboot_after_upgrade | default(omit) }}"
+ with_items: "{{ ovirt_infra_hosts }}"
+ loop_control:
+ label: "{{ item.name }}"
+ async: "{{ ovirt_hosts_max_timeout }}"
+ poll: 0
+ register: add_hosts
+ tags:
+ - hosts
+
+- name: Wait for hosts to be added
+ ansible.builtin.async_status: "jid={{ item.ansible_job_id }}"
+ register: job_result
+ with_items:
+ - "{{ add_hosts.results | ovirt.ovirt.removesensitivevmdata }}"
+ loop_control:
+ label: "{{ item.item.name }}"
+ tags:
+ - hosts
+ ignore_errors: true
+ until: job_result.finished
+ retries: "{{ ovirt_hosts_max_timeout // 20 }}"
+ delay: 20
+
+- name: Fail the play with unexpected error
+ ansible.builtin.fail:
+ msg: The host deploy failed with message '{{ item["exception"] }}'.
+ when: item.failed and "the following networks are missing" not in item["exception"]
+ with_items:
+ - "{{ job_result.results }}"
+
+- name: Set Power Management
+ ovirt_host_pm:
+ auth: "{{ ovirt_auth }}"
+ address: "{{ item.power_management.address | default(omit) }}"
+ state: "{{ item.power_management.state | default(omit) }}"
+ username: "{{ item.power_management.username | default(omit) }}"
+ password: "{{ item.power_management.password | default(omit) }}"
+ type: "{{ item.power_management.type | default(omit) }}"
+ options: "{{ item.power_management.options | default(omit) }}"
+ port: "{{ item.power_management.port | default(omit) }}"
+ name: "{{ item.name }}"
+ when: item.power_management is defined and not (item.state is defined and item.state == 'absent')
+ with_items: "{{ ovirt_infra_hosts }}"
+ loop_control:
+ label: "{{ item.name }}"
+ tags:
+ - hosts
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/README.md b/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/README.md
new file mode 100644
index 000000000..a5e5b3d22
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/README.md
@@ -0,0 +1,39 @@
+oVirt Mac Pools
+=================
+
+The `mac_pools` role is used to set up oVirt mac pools.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|-----------------------|-----------------------|-----------------------------------------------------------|
+| mac_pools | UNDEF | List of dictionaries that describe the mac pool. |
+
+The items in `mac_pools` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------------------|-----------------------|-------------------------------------------------------------------|
+| mac_pool_name | UNDEF | Name of the the MAC pool to manage. |
+| mac_pool_ranges | UNDEF | List of MAC ranges. The from and to should be splitted by comma. For example: 00:1a:4a:16:01:51,00:1a:4a:16:01:61 |
+| mac_pool_allow_duplicates | UNDEF | If (true) allow a MAC address to be used multiple times in a pool. Default value is set by oVirt engine to false. |
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt set mac pool
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ mac_pools:
+ - mac_pool_name: my_mac_pool
+ mac_pool_allow_duplicates: false
+ mac_pool_ranges:
+ - 00:1a:4a:16:01:51,00:1a:4a:16:01:61
+
+ roles:
+ - ovirt.ovirt.infra.roles.mac_pools
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/tasks/main.yml
new file mode 100644
index 000000000..417b53ed5
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- name: set mac pools
+ ovirt_mac_pool:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item.mac_pool_name }}"
+ allow_duplicates: "{{ item.mac_pool_allow_duplicates | default(omit) }}"
+ ranges: "{{ item.mac_pool_ranges }}"
+ with_items:
+ - "{{ mac_pools | default([]) }}"
+ tags:
+ - mac_pools
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/README.md b/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/README.md
new file mode 100644
index 000000000..bfd453ee4
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/README.md
@@ -0,0 +1,77 @@
+oVirt Networks
+==============
+
+The `networks` role sets up oVirt networks.
+
+Role Variables
+--------------
+
+The `data_center_name` variable specifes the data center name of the network.
+
+The `logical_networks` list can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| name | UNDEF | Name of the network. |
+| state | present | Specifies whether the network state is `present` or `absent`. |
+| vlan_tag | UNDEF | Specify VLAN tag. |
+| vm_network | True | If True network will be marked as network for VM. |
+| mtu | UNDEF | Maximum transmission unit (MTU) of the network. |
+| description | UNDEF | Description of the network. |
+| clusters | UNDEF | List of dictionaries describing how the network is managed in specific cluster. |
+| label | UNDEF | Name of the label to assign to the network. |
+
+More information about the parameters can be found in the [ovirt_network](http://docs.ansible.com/ansible/ovirt_network_module.html) module documentation.
+
+The `host_networks` list can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| name | UNDEF | Name of the host. |
+| state | UNDEF | Specifies whether the network state is `present` or `absent`. |
+| check | UNDEF | If true, verifies the connection between the host and engine. |
+| save | UNDEF | If true, the network configuration will be persistent, by default it is temporary. |
+| bond | UNDEF | Dictionary describing the network bond. |
+| networks | UNDEF | Dictionary describing the networks to be attached to the interface or bond. |
+| labels | UNDEF | List of names of the network label to be assigned to the bond or interface. |
+| interface | UNDEF | Name of the network interface where the logical network should be attached. |
+
+More information about the parameters can be found in the [ovirt_host_network](http://docs.ansible.com/ansible/ovirt_host_network_module.html) module documentation.
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ logical_networks:
+ - name: mynetwork
+ clusters:
+ - name: development
+ assigned: yes
+ required: no
+ display: no
+ migration: yes
+ gluster: no
+
+ host_networks:
+ - name: myhost1
+ check: true
+ save: true
+ bond:
+ name: bond0
+ mode: 2
+ interfaces:
+ - eth2
+ - eth3
+ networks:
+ - name: mynetwork
+ boot_protocol: dhcp
+
+ roles:
+ - ovirt.ovirt.infra.roles.networks
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/tasks/main.yml
new file mode 100644
index 000000000..6f24a7eeb
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/tasks/main.yml
@@ -0,0 +1,42 @@
+---
+#########################################
+# Logical networks
+#########################################
+- name: Add networks
+ ovirt_network:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ name: "{{ item.name }}"
+ data_center: "{{ data_center_name }}"
+ vlan_tag: "{{ item.vlan_tag | default(omit) }}"
+ vm_network: "{{ item.vm_network | default(omit) }}"
+ mtu: "{{ item.mtu | default(omit) }}"
+ description: "{{ item.description | default(omit) }}"
+ clusters: "{{ item.clusters | default(omit) }}"
+ label: "{{ item.label | default(omit) }}"
+ external_provider: "{{ item.external_provider | default(omit) }}"
+ with_items:
+ - "{{ logical_networks | default([]) }}"
+ tags:
+ - logical_networks
+ - networks
+
+#########################################
+# Host networks
+#########################################
+- name: Setup host networks
+ ovirt_host_network:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item.name }}"
+ state: "{{ item.state | default(omit) }}"
+ check: "{{ item.check | default(omit) }}"
+ save: "{{ item.save | default(omit) }}"
+ bond: "{{ item.bond | default(omit) }}"
+ networks: "{{ item.networks | default(omit) }}"
+ labels: "{{ item.labels | default(omit) }}"
+ interface: "{{ item.interface | default(omit) }}"
+ with_items:
+ - "{{ host_networks | default([]) }}"
+ tags:
+ - host_networks
+ - networks
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/README.md b/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/README.md
new file mode 100644
index 000000000..55970ee8d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/README.md
@@ -0,0 +1,49 @@
+oVirt Permissions
+=================
+
+The `permissions` role is used to set up oVirt permissions.
+
+Role Variables
+--------------
+
+The `permissions` list can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|----------------------------|
+| state | present | Specifies whether the state of the permission is `present` or `absent`. |
+| user_name | UNDEF | The user to manage the permission for. |
+| group_name | UNDEF | Name of the group to manage the permission for. |
+| authz_name | UNDEF | Name of the authorization provider of the group or user. |
+| role | UNDEF | The role to be assigned to the user or group. |
+| object_type | UNDEF | The object type which should be used to assign the permission. Possible object types are:<ul><li>data_center</li><li>cluster</li><li>host</li><li>storage_domain</li><li>network</li><li>disk</li><li>vm</li><li>vm_pool</li><li>template</li><li>cpu_profile</li><li>disk_profile</li><li>vnic_profile</li><li>system</li></ul> |
+| object_name | UNDEF | Name of the object where the permission should be assigned. |
+
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ permissions:
+ - state: present
+ user_name: user1
+ authz_name: internal-authz
+ role: UserRole
+ object_type: cluster
+ object_name: production
+
+ - state: present
+ group_name: group1
+ authz_name: internal-authz
+ role: UserRole
+ object_type: cluster
+ object_name: production
+
+ roles:
+ - ovirt.ovirt.infra.roles.permissions
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/tasks/main.yml
new file mode 100644
index 000000000..a8d662af7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/tasks/main.yml
@@ -0,0 +1,40 @@
+---
+################################
+## User & groups system magement
+################################
+- name: Manage users
+ ovirt_user:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ name: "{{ item.name }}"
+ authz_name: "{{ item.authz_name | default(omit) }}"
+ with_items:
+ - "{{ users | default([]) }}"
+ tags:
+ - permissions
+
+- name: Manage groups
+ ovirt_group:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ name: "{{ item.name }}"
+ authz_name: "{{ item.authz_name | default(omit) }}"
+ with_items:
+ - "{{ user_groups | default([]) }}"
+ tags:
+ - permissions
+
+- name: Manage permissions
+ ovirt_permission:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ group_name: "{{ item.group_name | default(omit) }}"
+ user_name: "{{ item.user_name | default(omit) }}"
+ authz_name: "{{ item.authz_name }}"
+ object_type: "{{ item.object_type }}"
+ object_name: "{{ item.object_name }}"
+ role: "{{ item.role }}"
+ with_items:
+ - "{{ permissions | default([]) }}"
+ tags:
+ - permissions
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/README.md b/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/README.md
new file mode 100644
index 000000000..0b716bf52
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/README.md
@@ -0,0 +1,65 @@
+oVirt Storages
+==============
+
+The `storages` role is used to set up oVirt storages.
+
+Role Variables
+--------------
+
+The value of item in `storages` dictionary can contain following parameters (the key is always a name of the storage):
+
+| Name | Default value | Description |
+|-----------------|----------------|---------------------------------------|
+| master | false | If true, the storage will be added as the first storage, meaning it will be the master storage. |
+| domain_function | data | The function of the storage domain. Possible values are: <ul><li>iso</li><li>export</li><li>data</li></ul>. |
+| localfs | UNDEF | Dictionary defining local storage. |
+| nfs | UNDEF | Dictionary defining NFS storage. |
+| iscsi | UNDEF | Dictionary defining iSCSI storage. |
+| posixfs | UNDEF | Dictionary defining PosixFS storage. |
+| fcp | UNDEF | Dictionary defining FCP storage. |
+| glusterfs | UNDEF | Dictionary defining glusterFS storage. |
+| discard_after_delete | UNDEF | If True storage domain blocks will be discarded upon deletion. Enabled by default. This parameter is relevant only for block based storage domains. |
+
+More information about the storages parameters can be found in the [Ansible documentation](http://docs.ansible.com/ansible/ovirt_storage_domains_module.html).
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ storages:
+ mynfsstorage:
+ master: true
+ state: present
+ nfs:
+ address: 1.2.3.4
+ path: /path
+ myiscsistorage:
+ state: present
+ iscsi:
+ target: iqn.2014-07.org.ovirt:storage
+ port: 3260
+ address: 10.11.12.13
+ username: username
+ password: password
+ lun_id: 3600140551fcc8348ea74a99b6760fbb4
+ discard_after_delete: false
+ myexporttemplates:
+ domain_function: export
+ nfs:
+ address: 100.101.102.103
+ path: /templates
+ myisostorage:
+ domain_function: iso
+ nfs:
+ address: 111.222.111.222
+ path: /iso
+
+ roles:
+ - ovirt.ovirt.infra.roles.storages
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/tasks/main.yml
new file mode 100644
index 000000000..ea96a0ffe
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/tasks/main.yml
@@ -0,0 +1,107 @@
+---
+#################################################
+# Storages
+#################################################
+# First add master storage
+- name: Add master storage
+ ovirt_storage_domain:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.value.state | default(omit) }}"
+ name: "{{ item.key }}"
+ domain_function: "{{ item.value.domain_function | default(omit) }}"
+ host: "{{ ovirt_infra_hosts[0].name }}"
+ data_center: "{{ data_center_name }}"
+ localfs: "{{ item.value.localfs | default(omit) }}"
+ nfs: "{{ item.value.nfs | default(omit) }}"
+ iscsi: "{{ item.value.iscsi | default(omit) }}"
+ posixfs: "{{ item.value.posixfs | default(omit) }}"
+ glusterfs: "{{ item.value.glusterfs | default(omit) }}"
+ fcp: "{{ item.value.fcp | default(omit) }}"
+ discard_after_delete: "{{ item.value.discard_after_delete | default(omit) }}"
+ with_dict: "{{ storages | default({}) }}"
+ when: item.value.master is defined and item.value.master
+ tags:
+ - storages
+
+# Next add rest of data storages
+- name: Add storages
+ ovirt_storage_domain:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.value.state | default(omit) }}"
+ name: "{{ item.key }}"
+ domain_function: "{{ item.value.domain_function | default(omit) }}"
+ host: "{{ ovirt_infra_hosts[0].name }}"
+ data_center: "{{ data_center_name }}"
+ localfs: "{{ item.value.localfs | default(omit) }}"
+ nfs: "{{ item.value.nfs | default(omit) }}"
+ iscsi: "{{ item.value.iscsi | default(omit) }}"
+ posixfs: "{{ item.value.posixfs | default(omit) }}"
+ glusterfs: "{{ item.value.glusterfs | default(omit) }}"
+ fcp: "{{ item.value.fcp | default(omit) }}"
+ discard_after_delete: "{{ item.value.discard_after_delete | default(omit) }}"
+ with_dict: "{{ storages | default({}) }}"
+ when: item.value.domain_function is not defined
+ tags:
+ - storages
+
+# Next add export/iso storages
+- name: Add export/iso storages
+ ovirt_storage_domain:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.value.state | default(omit) }}"
+ name: "{{ item.key }}"
+ domain_function: "{{ item.value.domain_function | default(omit) }}"
+ host: "{{ ovirt_infra_hosts[0].name }}"
+ data_center: "{{ data_center_name }}"
+ localfs: "{{ item.value.localfs | default(omit) }}"
+ nfs: "{{ item.value.nfs | default(omit) }}"
+ iscsi: "{{ item.value.iscsi | default(omit) }}"
+ posixfs: "{{ item.value.posixfs | default(omit) }}"
+ glusterfs: "{{ item.value.glusterfs | default(omit) }}"
+ fcp: "{{ item.value.fcp | default(omit) }}"
+ discard_after_delete: "{{ item.value.discard_after_delete | default(omit) }}"
+ with_dict: "{{ storages | default({}) }}"
+ when: item.value.domain_function is defined
+ tags:
+ - storages
+
+- name: Fetch storages
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "type=nfs or type=iscsi or type=posixfs or type=glusterfs or type=fcp"
+ # pattern: "type=nfs or type=posixfs or type=glusterfs"
+ # pattern: "type=iscsi or type=fcp"
+ fetch_nested: true
+ register: sd_info
+ tags:
+ - storages
+ - storage_connections
+
+- name: Storage connection warning
+ ansible.builtin.debug:
+ msg: "[WARNING]: The storage domain '{{ item.name }}' has multiple connections. This is not supported by the role. The storage connection update of this domain will be skipped." # noqa yaml[line-length]
+ when: item.storage_connections | length > 1
+ with_items:
+ - "{{ sd_info.ovirt_storage_domains | default([]) }}"
+
+- name: Update storage parameters
+ ovirt_storage_connection:
+ auth: "{{ ovirt_auth }}"
+ id: "{{ ansible_version.full is version('2.6.0', '>=') | ternary(item.storage_connections[0].id, item.storage_connections[0].id[0]) }}"
+ storage: "{{ item.name }}"
+ address: "{{ storages[item.name][item.storage.type].address | default(omit) }}"
+ path: "{{ storages[item.name][item.storage.type].path | default(omit) }}"
+ nfs_timeout: "{{ storages[item.name][item.storage.type].timeout | default(omit) }}"
+ nfs_version: "{{ storages[item.name][item.storage.type].version | default(omit) }}"
+ nfs_retrans: "{{ storages[item.name][item.storage.type].retrans | default(omit) }}"
+ mount_options: "{{ storages[item.name][item.storage.type].mount_options | default(omit) }}"
+ username: "{{ storages[item.name][item.storage.type].username | default(omit) }}"
+ password: "{{ storages[item.name][item.storage.type].password | default(omit) }}"
+ port: "{{ storages[item.name][item.storage.type].port | default(omit) }}"
+ force: true
+ with_items:
+ - "{{ sd_info.ovirt_storage_domains | default([]) }}"
+ when: item.storage_connections | length == 1 and storages is defined and item.name in storages
+ tags:
+ - storages
+ - storage_connections
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/tasks/create_infra.yml b/ansible_collections/ovirt/ovirt/roles/infra/tasks/create_infra.yml
new file mode 100644
index 000000000..cbf2fa0f3
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/tasks/create_infra.yml
@@ -0,0 +1,37 @@
+---
+- name: Run mac-pools sub-role
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.infra.roles.mac_pools
+
+- name: Run datacenters sub-role
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.infra.roles.datacenters
+
+- name: Run clusters sub-role
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.infra.roles.clusters
+
+- name: Run hosts sub-role
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.infra.roles.hosts
+
+- name: Run networks sub-role
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.infra.roles.networks
+
+- name: Run storages sub-role
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.infra.roles.storages
+
+- name: Run aaa-jdbc sub-role
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.infra.roles.aaa_jdbc
+ when: users is defined or user_groups is defined
+
+- name: Run external-providers sub-role
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.infra.roles.external_providers
+
+- name: Run permissions sub-role
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.infra.roles.permissions
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/infra/tasks/main.yml
new file mode 100644
index 000000000..9af23a006
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/tasks/main.yml
@@ -0,0 +1,49 @@
+---
+- name: Main block
+ block:
+ - name: Check if mandatory parameters are correct
+ ansible.builtin.fail:
+ msg: "one of mandatory parameter data_center_name or compatibility_version is undefined"
+ when: "data_center_name is undefined or compatibility_version is undefined"
+
+ - name: Get list of oVirt infra hosts we can use throughtout the role
+ ansible.builtin.set_fact:
+ ovirt_infra_hosts: "{{ lookup('vars', hosts_var_name, default=[]) }}"
+
+ - name: Check if hosts are correct
+ ansible.builtin.fail:
+ msg: "'{{ lookup('vars', hosts_var_name) }}' variable does not contain mandatory parameter '{{ item[1] }}'"
+ when: item[1] not in item[0]
+ with_nested:
+ - "{{ ovirt_infra_hosts }}"
+ - ['name']
+
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ hostname: "{{ engine_fqdn | default(lookup('env','OVIRT_HOSTNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: loggedin
+ tags:
+ - always
+
+ - name: Create infrastrucutre
+ import_tasks: create_infra.yml
+ when: data_center_state == 'present'
+
+ - name: Remove infrastrucutre
+ import_tasks: remove_infra.yml
+ when: data_center_state == 'absent'
+
+ always:
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when: not loggedin.skipped | default(false)
+ tags:
+ - always
diff --git a/ansible_collections/ovirt/ovirt/roles/infra/tasks/remove_infra.yml b/ansible_collections/ovirt/ovirt/roles/infra/tasks/remove_infra.yml
new file mode 100644
index 000000000..de848e0e7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/infra/tasks/remove_infra.yml
@@ -0,0 +1,4 @@
+---
+- name: Run datacenters sub-role
+ ansible.builtin.import_role:
+ name: ovirt.ovirt.infra.roles.datacenters
diff --git a/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/README.md b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/README.md
new file mode 100644
index 000000000..a0ac5dfbc
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/README.md
@@ -0,0 +1,46 @@
+oVirt Remove Stale LUN
+=========
+
+The `remove_stale_lun` role iterates through all the hosts in a data center and remove stale LUN devices from these hosts.
+Example playbook uses engine private ssh key for connection to the hosts and therefore assumes it's executed from the engine machine.
+If the playbook is not executed on the engine, user ssh key has to be added on all hosts which belongs to the given data center or the user has to provide appropriate inventory file.
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------------|-----------------------|-----------------------------------------------------|
+| data_center | Default | Name of the data center from which hosts stale LUN should be removed. |
+| lun_wwid | UNDEF | WWID of the stale LUN(s) which should be removed from the hosts. Separate multiple LUNs with spaces. |
+
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: oVirt remove stale LUN
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ ansible_ssh_common_args: "-o StrictHostKeyChecking=no"
+ ansible_user: root
+ ansible_ssh_private_key_file: /etc/pki/ovirt-engine/keys/engine_id_rsa
+
+ engine_fqdn: ovirt.example.com
+ engine_user: admin@internal
+
+ data_center: default
+ lun_wwid: 36001405a77a1ee25cbf4439b7ddd2062 36001405ddefe8392bb8443e89bde4b40
+
+ roles:
+ - remove_stale_lun
+ collections:
+ - ovirt.ovirt
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/defaults/main.yml
new file mode 100644
index 000000000..2088a6cdd
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+data_center: Default
diff --git a/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/examples/passwords.yml b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/examples/passwords.yml
new file mode 100644
index 000000000..92c7613c9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/examples/remove_stale_lun.yml b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/examples/remove_stale_lun.yml
new file mode 100644
index 000000000..f5d27e805
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/examples/remove_stale_lun.yml
@@ -0,0 +1,34 @@
+---
+- name: Discover hosts in DC and add to Ansible runtime inventory
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt.example.com
+ engine_user: admin@internal
+ data_center: default
+ roles:
+ - role: remove_stale_lun
+ fetch_hosts: true
+ collections:
+ - ovirt.ovirt
+
+- name: Remove LUNs from the discovered hosts in DC
+ hosts: dc_hosts
+
+ vars:
+ ansible_ssh_common_args: "-o StrictHostKeyChecking=no"
+ ansible_user: root
+ ansible_ssh_private_key_file: /etc/pki/ovirt-engine/keys/engine_id_rsa
+ lun_wwid: 36001405a77a1ee25cbf4439b7ddd2062 36001405ddefe8392bb8443e89bde4b40
+
+ roles:
+ - role: remove_stale_lun
+ fetch_hosts: false
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/fetch_hosts.yml b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/fetch_hosts.yml
new file mode 100644
index 000000000..788d5ec7c
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/fetch_hosts.yml
@@ -0,0 +1,46 @@
+---
+## https://github.com/ansible/ansible/issues/22397
+## Ansible 2.3 generates a WARNING when using {{ }} in default variables of role
+## these workarounds it until Ansible resolves the issue:
+- name: Initialize variables
+ ansible.builtin.set_fact:
+ data_center: "{{ data_center | mandatory }}"
+
+- name: Fetch hosts main block
+ block:
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ hostname: "{{ engine_fqdn | default(lookup('env','OVIRT_HOSTNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ token: "{{ engine_token | default(lookup('env','OVIRT_TOKEN')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: login_result
+ tags:
+ - always
+
+ - name: Get datacenter hosts
+ ovirt_host_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center }}"
+ register: host_info
+
+ - name: Add hosts
+ ansible.builtin.add_host:
+ hostname: "{{ item.address }}"
+ groups: dc_hosts
+ with_items:
+ - "{{ host_info.ovirt_hosts }}"
+
+ always:
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when:
+ - login_result.skipped is defined and not login_result.skipped
+ tags:
+ - always
diff --git a/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/main.yml
new file mode 100644
index 000000000..18fbea5a1
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/main.yml
@@ -0,0 +1,5 @@
+- include_tasks: fetch_hosts.yml
+ when: fetch_hosts
+
+- include_tasks: remove_mpath_device.yml
+ when: not fetch_hosts
diff --git a/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/remove_mpath_device.yml b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/remove_mpath_device.yml
new file mode 100644
index 000000000..dfea114e7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/remove_stale_lun/tasks/remove_mpath_device.yml
@@ -0,0 +1,27 @@
+- name: Initialize variables
+ ansible.builtin.set_fact:
+ lun_wwid: "{{ lun_wwid | mandatory }}"
+
+- name: Get underlying disks (paths) for a multipath device(s) and turn them into a list.
+ ansible.builtin.shell: >-
+ for dev in {{ lun_wwid }}; do
+ set -euo pipefail && dmsetup deps -o devname $dev | cut -f 2 | cut -c 3- | tr -d "()" | tr "\r\n" " ";
+ done
+ changed_when: false
+ register: disks
+ check_mode: "no"
+
+- name: Remove from multipath device.
+ ansible.builtin.shell: "for dev in {{ lun_wwid }}; do multipath -f $dev || exit 1; done"
+ changed_when: true
+ register: flush_results
+ retries: 6
+ check_mode: "no"
+ until: flush_results.rc == 0
+
+- name: Remove each path from the SCSI subsystem.
+ ansible.builtin.shell: "for dev in {{ item.stdout }}; do echo 1 > /sys/block/$dev/device/delete; done"
+ changed_when: true
+ check_mode: "no"
+ with_items:
+ - "{{ disks }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/README.md b/ansible_collections/ovirt/ovirt/roles/repositories/README.md
new file mode 100644
index 000000000..45b5c10a5
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/README.md
@@ -0,0 +1,132 @@
+oVirt Repositories
+==================
+
+The `repositories` role is used to set the repositories required for
+oVirt engine or host installation. By default it copies content of
+/etc/yum.repos.d/ to /tmp/repo-backup-{{timestamp}}, so it's easy to undo that operation.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|--------------------------------------------|-----------------------|-------------------------------------------|
+| ovirt_repositories_ovirt_release_rpm | UNDEF | URL of oVirt release package, which contains required repositories configuration. |
+| ovirt_repositories_ovirt_release_rpm_gpg | https://plain.resources.ovirt.org/pub/keys/RPM-GPG-ovirt-v2 | Address of the rpm GPG key. |
+| ovirt_repositories_disable_gpg_check | False | Disable the GPG check for <i>ovirt_repositories_ovirt_release_rpm</i>. by default is False unless 'master.rpm' in <i>ovirt_repositories_ovirt_release_rpm</i>. |
+| ovirt_repositories_use_subscription_manager| False | If true it will use repos from subscription manager and the value of <i>ovirt_repositories_ovirt_release_rpm</i> will be ignored. |
+| ovirt_repositories_ovirt_version | 4.4 | oVirt release version (Supported versions [4.1, 4.2, 4.3, 4.4]). Will be used to enable the required repositories and enable modules. |
+| ovirt_repositories_target_host | engine | Type of the target machine, which should be one of [engine, host, rhvh, host_ppc]. This parameter takes effect only in case <i>ovirt_repositories_use_subscription_manager</i> is set to True. If incorrect version or target is specified no repositories are enabled. The host_ppc is available only on 4.4. |
+| ovirt_repositories_rh_username | UNDEF | Username to use for subscription manager. |
+| ovirt_repositories_rh_password | UNDEF | Password to use for subscription manager. |
+| ovirt_repositories_pool_ids | UNDEF | List of pools ids to subscribe to. |
+| ovirt_repositories_pools | UNDEF | Specify a list of subscription pool names. Use <i>ovirt_repositories_pool_ids</i> instead if possible, as it is much faster. |
+| ovirt_repositories_subscription_manager_repos| [] | List of repositories to enable by subscription-manager. By default we have list of repositories for each {{ovirt_repositories_target_host}}_{{ovirt_repositories_ovirt_version}} in vars folder. |
+| ovirt_repositories_repos_backup | True | When set to `False`, original repositories won't be backed up. |
+| ovirt_repositories_repos_backup_path | /tmp/repo-backup-{{timestamp}} | Directory to backup the original repositories configuration |
+| ovirt_repositories_force_register | False | Bool to register the system even if it is already registered. |
+| ovirt_repositories_rhsm_server_hostname | UNDEF | Hostname of the RHSM server. By default it's used from rhsm configuration. |
+| ovirt_repositories_clear | False | If True all repositories will be unregistered before registering new ones. |
+| ovirt_repositories_org | UNDEF | The org will be used for subscription manager. The `ovirt_repositories_org` and `ovirt_repositories_activationkey` will be used over `ovirt_repositories_pool_ids`. |
+| ovirt_repositories_activationkey | UNDEF | The activation key will be used for the subscription manager. |
+| ovirt_repositories_ca_rpm_url | UNDEF | The URL for Satellite rpm will set up host certificates. |
+| ovirt_repositories_ca_rpm_validate_certs | UNDEF | If `False` it will ignore all SSL certificates for the `ovirt_repositories_ca_rpm_url`. |
+| ovirt_repositories_ca_rpm_disable_gpg_check| UNDEF | If `True` it will ignore all GPG check for the `ovirt_repositories_ca_rpm_url`. |
+| ovirt_repositories_rhsm_environment | UNDEF | The Satellite environment to specify libraries. |
+
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: Setup repositories using oVirt release package
+ hosts: localhost
+ vars_files:
+ # Contains encrypted `username` and `password` variables using ansible-vault
+ - passwords.yml
+ vars:
+ ovirt_repositories_ovirt_release_rpm: http://resources.ovirt.org/pub/yum-repo/ovirt-master-release.rpm
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
+```
+
+```yaml
+- name: Setup repositories using Subscription Manager
+ hosts: localhost
+
+ vars:
+ ovirt_repositories_use_subscription_manager: True
+ ovirt_repositories_force_register: True
+ ovirt_repositories_rh_username: "{{ ovirt_repositories_rh_username }}"
+ ovirt_repositories_rh_password: "{{ ovirt_repositories_rh_password }}"
+ # The following pool IDs are not valid and should be replaced.
+ ovirt_repositories_pool_ids:
+ - 0123456789abcdef0123456789abcdef
+ - 1123456789abcdef0123456789abcdef
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
+```
+
+```yaml
+- name: Setup repositories using Subscription Manager pool name
+ hosts: localhost
+
+ vars:
+ ovirt_repositories_use_subscription_manager: True
+ ovirt_repositories_force_register: True
+ ovirt_repositories_rh_username: "{{ ovirt_repositories_rh_username }}"
+ ovirt_repositories_rh_password: "{{ ovirt_repositories_rh_password }}"
+ ovirt_repositories_pools:
+ - "Red Hat Cloud Infrastructure, Premium (2-sockets)"
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
+```
+
+```yaml
+- name: Setup repositories using Subscription Manager with Satellite using username and password
+ hosts: localhost
+
+ vars:
+ ovirt_repositories_use_subscription_manager: true
+ ovirt_repositories_ca_rpm_url: https://example.com/pub/katello-ca-consumer-latest.noarch.rpm
+ ovirt_repositories_ca_rpm_validate_certs: false
+ ovirt_repositories_ca_rpm_disable_gpg_check: true
+ ovirt_repositories_target_host: engine
+ ovirt_repositories_rhsm_environment: Library
+ ovirt_repositories_rh_password: "{{ ovirt_repositories_rh_password }}"
+ ovirt_repositories_rh_username: "{{ ovirt_repositories_rh_username }}"
+ ovirt_repositories_pool_ids:
+ - 8aa508b87f922c3b017f97a785a40068
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
+```
+
+```yaml
+- name: Setup repositories using Subscription Manager with Satellite using org and activationkey
+ hosts: localhost
+ vars:
+ ovirt_repositories_use_subscription_manager: true
+ ovirt_repositories_org: "4fc82b1a-7d80-44cf-8ef6-affd8c6daa4f"
+ ovirt_repositories_activationkey: "RHV_CDN_Host"
+ ovirt_repositories_ca_rpm_url: https://example.com/pub/katello-ca-consumer-latest.noarch.rpm
+ ovirt_repositories_ca_rpm_validate_certs: false
+ ovirt_repositories_ca_rpm_disable_gpg_check: true
+ ovirt_repositories_target_host: engine
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
+```
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/repositories/defaults/main.yml
new file mode 100644
index 000000000..b8c281855
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+ovirt_repositories_repos_backup: true
+ovirt_repositories_repos_backup_path: "/tmp/repo-backup-{{ '%Y-%m-%d-%H:%M:%S' | strftime(ansible_date_time.epoch) }}"
+ovirt_repositories_use_subscription_manager: false
+ovirt_repositories_force_register: false
+ovirt_repositories_clear: false
+ovirt_repositories_ovirt_version: 4.4
+ovirt_repositories_target_host: engine
+ovirt_repositories_subscription_manager_repos: []
+ovirt_repositories_subscription_manager_eus_repos: []
+ovirt_repositories_ovirt_dnf_modules: ["pki-deps", "postgresql:12", "javapackages-tools", "mod_auth_openidc:2.3", "nodejs:14"]
+ovirt_repositories_rh_dnf_modules: ["pki-deps", "postgresql:12", "nodejs:14"]
+ovirt_repositories_ovirt_release_rpm_gpg: https://plain.resources.ovirt.org/pub/keys/RPM-GPG-ovirt-v2
+ovirt_repositories_disable_gpg_check: "{{ True if 'master.rpm' in ovirt_repositories_ovirt_release_rpm else False }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_release_rpm.yml b/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_release_rpm.yml
new file mode 100644
index 000000000..b5aa62bc8
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_release_rpm.yml
@@ -0,0 +1,11 @@
+---
+- name: Setup repositories using oVirt release package
+ hosts: localhost
+
+ vars:
+ ovirt_repositories_ovirt_release_rpm: http://resources.ovirt.org/pub/yum-repo/ovirt-master-release.rpm
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_subscription_manager.yml b/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_subscription_manager.yml
new file mode 100644
index 000000000..c796702da
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_subscription_manager.yml
@@ -0,0 +1,22 @@
+---
+- name: Setup repositories using Subscription Manager
+ hosts: localhost
+
+ vars_files:
+ # Contains encrypted `ovirt_repositories_rh_username`
+ # and `ovirt_repositories_rh_password` variables using ansible-vault
+ - passwords.yml
+
+ vars:
+ ovirt_repositories_use_subscription_manager: "True"
+ ovirt_repositories_rh_username: "{{ovirt_repositories_rh_username}}"
+ ovirt_repositories_rh_password: "{{ovirt_repositories_rh_password}}"
+ # The following pool IDs are not valid and should be replaced.
+ ovirt_repositories_pool_ids:
+ - 0123456789abcdef0123456789abcdef
+ - 1123456789abcdef0123456789abcdef
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/examples/passwords.yml b/ansible_collections/ovirt/ovirt/roles/repositories/examples/passwords.yml
new file mode 100644
index 000000000..98a551d7e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/examples/passwords.yml
@@ -0,0 +1,13 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+ovirt_repositories_rh_username: "myuser"
+ovirt_repositories_rh_password: "mypass"
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/tasks/backup-repos.yml b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/backup-repos.yml
new file mode 100644
index 000000000..e57045c3e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/backup-repos.yml
@@ -0,0 +1,26 @@
+---
+- name: "Find repo files to backup"
+ ansible.builtin.find:
+ paths: "/etc/yum.repos.d"
+ patterns: "*.repo"
+ tags: backup
+ register: files_to_backup
+
+- name: Creating folder to backup repositories files
+ ansible.builtin.file:
+ path: "{{ ovirt_repositories_repos_backup_path }}"
+ state: directory
+ mode: 0755
+ tags: backup
+ when: files_to_backup.files
+
+- name: Copy current repositories files to backup folder
+ copy:
+ src: "{{ item.path }}"
+ dest: "{{ ovirt_repositories_repos_backup_path }}"
+ remote_src: true
+ mode: preserve
+ with_items: "{{ files_to_backup.files }}"
+ tags:
+ - skip_ansible_lint
+ - backup
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/tasks/install-satellite-ca.yml b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/install-satellite-ca.yml
new file mode 100644
index 000000000..b8460f29a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/install-satellite-ca.yml
@@ -0,0 +1,36 @@
+---
+- name: Check if FIPS is enabled
+ ansible.builtin.command: cat /proc/sys/crypto/fips_enabled
+ changed_when: false
+ register: fips_check_command
+
+- name: Set FIPS enabled variable
+ ansible.builtin.set_fact:
+ ovirt_repositories_fips_enabled: "{{ fips_check_command.stdout == '1' }}"
+
+- name: Install Satellite certificates wihtout FIPS
+ ansible.builtin.dnf:
+ name: "{{ ovirt_repositories_ca_rpm_url }}"
+ state: present
+ validate_certs: "{{ ovirt_repositories_ca_rpm_validate_certs | default(omit) }}"
+ disable_gpg_check: "{{ ovirt_repositories_ca_rpm_disable_gpg_check | default(omit) }}"
+ when: not ovirt_repositories_fips_enabled
+
+- name: Install Satellite certificates with FIPS block
+ block:
+ - name: Download Satellite certificate rpm
+ ansible.builtin.get_url:
+ url: "{{ ovirt_repositories_ca_rpm_url }}"
+ dest: /tmp/sat_ca.rpm
+ mode: '0644'
+ validate_certs: "{{ ovirt_repositories_ca_rpm_validate_certs | default(omit) }}"
+
+ - name: Install Satellite certificates with FIPS
+ ansible.builtin.command: "rpm -U --nodigest --nofiledigest --force /tmp/sat_ca.rpm" # noqa command-instead-of-module
+ changed_when: true
+
+ - name: Remove downloaded rpm
+ ansible.builtin.file:
+ path: /tmp/sat_ca.rpm
+ state: absent
+ when: ovirt_repositories_fips_enabled
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/main.yml
new file mode 100644
index 000000000..0f1d5fe03
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+- name: Check if Subscription Manager is about to be used on non RHEL system
+ ansible.builtin.fail:
+ msg: "Subscription manager could be used only on Red Hat Enterprise Linux"
+ when:
+ - ovirt_repositories_use_subscription_manager | bool
+ - ansible_distribution != 'RedHat'
+
+- name: Backup current repositories
+ include_tasks: backup-repos.yml
+ when: ovirt_repositories_repos_backup
+
+- name: Setup repositories
+ block:
+ - name: Install Satellite CA
+ include_tasks: install-satellite-ca.yml
+ when: ovirt_repositories_ca_rpm_url is defined
+
+ - name: Setup repositories using Subscription Manager
+ include_tasks: rh-subscription.yml
+ when: ovirt_repositories_org is not defined and ovirt_repositories_activationkey is not defined
+
+ - name: Setup repositories using Subscription Manager - Satellite
+ include_tasks: satellite-subscription.yml
+ when: ovirt_repositories_org is defined and ovirt_repositories_activationkey is defined
+ when: ovirt_repositories_use_subscription_manager | bool
+
+- name: Setup repositories using oVirt release package
+ include_tasks: rpm.yml
+ when: not ovirt_repositories_use_subscription_manager | bool
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rh-subscription.yml b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rh-subscription.yml
new file mode 100644
index 000000000..e7545b4bc
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rh-subscription.yml
@@ -0,0 +1,83 @@
+---
+- name: Check if mandatory variables are set
+ ansible.builtin.fail:
+ msg: "Either ovirt_repositories_pool_ids or ovirt_repositories_pools must be defined."
+ when:
+ - "ovirt_repositories_pool_ids is not defined"
+ - "ovirt_repositories_pools is not defined or ovirt_repositories_pools | list | length == 0"
+
+- name: Check if mandatory variables are set
+ ansible.builtin.fail:
+ msg: "Both ovirt_repositories_pool_ids and ovirt_repositories_pools can't be defined, only one of them."
+ when:
+ - "ovirt_repositories_pool_ids is defined"
+ - "ovirt_repositories_pools is defined"
+
+- name: Ensure subscription-manager package is installed
+ ansible.builtin.package:
+ name: subscription-manager
+ state: present
+
+- name: Register to subscription manager
+ ansible.builtin.command: |
+ subscription-manager register
+ --username {{ ovirt_repositories_rh_username }}
+ --password {{ ovirt_repositories_rh_password }}
+ {% if ovirt_repositories_force_register is defined and ovirt_repositories_force_register|bool %} --force {% endif %}
+ {% if ovirt_repositories_rhsm_environment is defined %} --environment {{ ovirt_repositories_rhsm_environment }} {% endif %}
+ {% if ovirt_repositories_rhsm_server_hostname is defined %} --serverurl {{ ovirt_repositories_rhsm_server_hostname }} {% endif %}
+ changed_when: false
+ no_log: true
+
+- include_tasks: search-pool-id.yml
+ with_items:
+ - "{{ ovirt_repositories_pools }}"
+ when: ovirt_repositories_pools is defined
+
+- name: Subscribe to multiple pool IDs
+ ansible.builtin.command: subscription-manager attach {% for id in ovirt_repositories_pool_ids %} --pool {{ id }} {% endfor %}
+ when: ovirt_repositories_pool_ids is defined and ovirt_repositories_pool_ids | list | length != 0
+
+- name: "Include {{ ovirt_repositories_target_host }}_{{ ovirt_repositories_ovirt_version }}.yml variables"
+ ansible.builtin.include_vars: "{{ ovirt_repositories_target_host }}_{{ ovirt_repositories_ovirt_version }}.yml"
+ when: ovirt_repositories_subscription_manager_repos | list | length == 0
+
+- name: "Include {{ ovirt_repositories_target_host }}_eus_{{ ovirt_repositories_ovirt_version }}.yml variables"
+ ansible.builtin.include_vars: "{{ ovirt_repositories_target_host }}_eus_{{ ovirt_repositories_ovirt_version }}.yml"
+ when:
+ - ovirt_repositories_ovirt_version|string >= '4.4'
+ - ovirt_repositories_subscription_manager_eus_repos | list | length == 0
+
+- name: Disable all repositories
+ ansible.builtin.command: subscription-manager repos --disable=*
+ when: ovirt_repositories_clear
+
+- name: Enable required repositories
+ ansible.builtin.command: subscription-manager repos --enable={{ item }}
+ changed_when: false
+ with_items: "{{ ovirt_repositories_subscription_manager_repos }}"
+
+- name: Enable EUS repositories
+ ansible.builtin.command: subscription-manager repos --enable={{ item }}
+ changed_when: false
+ ignore_errors: true # EUS channels might not be available, so we cannot break the installation when there is a subscription error
+ with_items: "{{ ovirt_repositories_subscription_manager_eus_repos }}"
+ when:
+ - ovirt_repositories_ovirt_version|string >= '4.4'
+
+- name: Fix RHEL version to 8.6
+ ansible.builtin.command: subscription-manager release --set=8.6
+ changed_when: false
+ when:
+ - ovirt_repositories_ovirt_version|string >= '4.4'
+
+- name: Enable dnf modules
+ ansible.builtin.command: "dnf module enable -y {{ ovirt_repositories_rh_dnf_modules | join(' ') }}"
+ args:
+ warn: false
+ when:
+ - ovirt_repositories_ovirt_version|string >= '4.4'
+ - ovirt_repositories_target_host == 'engine'
+
+- name: set ovirt_repositories_subscription_manager_repos to empty list for the next time
+ ansible.builtin.include_vars: default.yml
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rpm.yml b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rpm.yml
new file mode 100644
index 000000000..25e0e4040
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rpm.yml
@@ -0,0 +1,20 @@
+---
+- name: Use oVirt GPG key
+ ansible.builtin.rpm_key:
+ state: present
+ key: "{{ ovirt_repositories_ovirt_release_rpm_gpg }}"
+ when: not ovirt_repositories_disable_gpg_check
+
+- name: Install oVirt release package
+ ansible.builtin.package:
+ name: "{{ ovirt_repositories_ovirt_release_rpm | mandatory }}"
+ state: present
+ disable_gpg_check: "{{ ovirt_repositories_disable_gpg_check }}"
+
+- name: Enable dnf modules
+ ansible.builtin.command: "dnf module enable -y {{ ovirt_repositories_ovirt_dnf_modules | join(' ') }}"
+ args:
+ warn: false
+ when:
+ - ovirt_repositories_ovirt_version|string >= '4.4'
+ - ovirt_repositories_target_host == 'engine'
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/tasks/satellite-subscription.yml b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/satellite-subscription.yml
new file mode 100644
index 000000000..bf1adc823
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/satellite-subscription.yml
@@ -0,0 +1,30 @@
+---
+- name: Ensure subscription-manager package is installed
+ ansible.builtin.package:
+ name: subscription-manager
+ state: present
+
+- name: Register to subscription manager
+ ansible.builtin.command: |
+ subscription-manager register
+ --org {{ ovirt_repositories_org }}
+ --activationkey {{ ovirt_repositories_activationkey }}
+ {% if ovirt_repositories_force_register is defined and ovirt_repositories_force_register|bool %} --force {% endif %}
+ changed_when: false
+
+- name: Fix RHEL version to 8.6
+ ansible.builtin.command: subscription-manager release --set=8.6
+ changed_when: false
+ when:
+ - ovirt_repositories_ovirt_version|string >= '4.4'
+
+- name: Enable dnf modules
+ ansible.builtin.command: "dnf module enable -y {{ ovirt_repositories_rh_dnf_modules | join(' ') }}"
+ args:
+ warn: false
+ when:
+ - ovirt_repositories_ovirt_version|string >= '4.4'
+ - ovirt_repositories_target_host == 'engine'
+
+- name: set ovirt_repositories_subscription_manager_repos to empty list for the next time
+ ansible.builtin.include_vars: default.yml
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/tasks/search-pool-id.yml b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/search-pool-id.yml
new file mode 100644
index 000000000..2628960de
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/tasks/search-pool-id.yml
@@ -0,0 +1,19 @@
+---
+- name: Search pool ID for '{{ item }}'
+ ansible.builtin.command: subscription-manager list --available --pool-only --matches="{{ item }}"
+ changed_when: false
+ register: availible_pool
+
+- name: Search pool ID for '{{ item }}'
+ ansible.builtin.command: subscription-manager list --consumed --pool-only --matches="{{ item }}"
+ register: consumed_pool
+ when: availible_pool['stdout_lines'] | list | length == 0
+
+- name: Fail when could not find pool '{{ item }}'
+ ansible.builtin.fail:
+ msg: The pool '{{ item }}' could not be found.
+ when: availible_pool['stdout_lines'] | list | length == 0 and consumed_pool['stdout_lines'] | list | length == 0
+
+- name: Attach searched pool ids to ovirt_repositories_pool_ids
+ ansible.builtin.set_fact:
+ ovirt_repositories_pool_ids: "{{ availible_pool['stdout_lines'] + ovirt_repositories_pool_ids | default([]) }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/default.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/default.yml
new file mode 100644
index 000000000..b15ff8554
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/default.yml
@@ -0,0 +1 @@
+ovirt_repositories_subscription_manager_repos: []
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.1.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.1.yml
new file mode 100644
index 000000000..802be7a04
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.1.yml
@@ -0,0 +1,6 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-supplementary-rpms
+ - rhel-7-server-rhv-4.1-rpms
+ - rhel-7-server-rhv-4-tools-rpms
+ - jb-eap-7-for-rhel-7-server-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.2.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.2.yml
new file mode 100644
index 000000000..991edf088
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.2.yml
@@ -0,0 +1,7 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-supplementary-rpms
+ - rhel-7-server-rhv-4.2-manager-rpms
+ - rhel-7-server-rhv-4-manager-tools-rpms
+ - rhel-7-server-ansible-2-rpms
+ - jb-eap-7.2-for-rhel-7-server-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.3.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.3.yml
new file mode 100644
index 000000000..314c5fbc7
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.3.yml
@@ -0,0 +1,7 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-supplementary-rpms
+ - rhel-7-server-rhv-4.3-manager-rpms
+ - rhel-7-server-rhv-4-manager-tools-rpms
+ - rhel-7-server-ansible-2.9-rpms
+ - jb-eap-7.2-for-rhel-7-server-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.4.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.4.yml
new file mode 100644
index 000000000..68f04516f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.4.yml
@@ -0,0 +1,8 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-8-for-x86_64-baseos-rpms
+ - rhel-8-for-x86_64-appstream-rpms
+ - rhv-4.4-manager-for-rhel-8-x86_64-rpms
+ - jb-eap-7.4-for-rhel-8-x86_64-rpms
+ - fast-datapath-for-rhel-8-x86_64-rpms
+ - openstack-16.2-cinderlib-for-rhel-8-x86_64-rpms
+ - rhceph-4-tools-for-rhel-8-x86_64-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_eus_4.4.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_eus_4.4.yml
new file mode 100644
index 000000000..2c6f37118
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_eus_4.4.yml
@@ -0,0 +1,3 @@
+ovirt_repositories_subscription_manager_eus_repos:
+ - rhel-8-for-x86_64-baseos-eus-rpms
+ - rhel-8-for-x86_64-appstream-eus-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.1.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.1.yml
new file mode 100644
index 000000000..47b233372
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.1.yml
@@ -0,0 +1,3 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-rhv-4-mgmt-agent-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.2.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.2.yml
new file mode 100644
index 000000000..edb0956d3
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.2.yml
@@ -0,0 +1,4 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-rhv-4-mgmt-agent-rpms
+ - rhel-7-server-ansible-2-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.3.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.3.yml
new file mode 100644
index 000000000..fe51604d8
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.3.yml
@@ -0,0 +1,4 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-rhv-4-mgmt-agent-rpms
+ - rhel-7-server-ansible-2.9-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.4.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.4.yml
new file mode 100644
index 000000000..6620da6ef
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.4.yml
@@ -0,0 +1,8 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-8-for-x86_64-baseos-rpms
+ - rhel-8-for-x86_64-appstream-rpms
+ - rhv-4-mgmt-agent-for-rhel-8-x86_64-rpms
+ - advanced-virt-for-rhel-8-x86_64-rpms
+ - fast-datapath-for-rhel-8-x86_64-rpms
+ - openstack-16.2-cinderlib-for-rhel-8-x86_64-rpms
+ - rhceph-4-tools-for-rhel-8-x86_64-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_eus_4.4.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_eus_4.4.yml
new file mode 100644
index 000000000..2c6f37118
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_eus_4.4.yml
@@ -0,0 +1,3 @@
+ovirt_repositories_subscription_manager_eus_repos:
+ - rhel-8-for-x86_64-baseos-eus-rpms
+ - rhel-8-for-x86_64-appstream-eus-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_ppc_4.4.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_ppc_4.4.yml
new file mode 100644
index 000000000..ab914d09d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_ppc_4.4.yml
@@ -0,0 +1,6 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-8-for-ppc64le-baseos-rpms
+ - rhel-8-for-ppc64le-appstream-rpms
+ - rhv-4-mgmt-agent-for-rhel-8-ppc64le-rpms
+ - advanced-virt-for-rhel-8-ppc64le-rpms
+ - fast-datapath-for-rhel-8-ppc64le-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_ppc_eus_4.4.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_ppc_eus_4.4.yml
new file mode 100644
index 000000000..c2c5ef44d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_ppc_eus_4.4.yml
@@ -0,0 +1,3 @@
+ovirt_repositories_subscription_manager_eus_repos:
+ - rhel-8-for-ppc64le-baseos-eus-rpms
+ - rhel-8-for-ppc64le-appstream-eus-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.1.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.1.yml
new file mode 100644
index 000000000..edaae59a5
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.1.yml
@@ -0,0 +1,2 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rhvh-4-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.2.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.2.yml
new file mode 100644
index 000000000..edaae59a5
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.2.yml
@@ -0,0 +1,2 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rhvh-4-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.3.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.3.yml
new file mode 100644
index 000000000..edaae59a5
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.3.yml
@@ -0,0 +1,2 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rhvh-4-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.4.yml b/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.4.yml
new file mode 100644
index 000000000..aaf695bd5
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.4.yml
@@ -0,0 +1,2 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhvh-4-for-rhel-8-x86_64-rpms
diff --git a/ansible_collections/ovirt/ovirt/roles/shutdown_env/README.md b/ansible_collections/ovirt/ovirt/roles/shutdown_env/README.md
new file mode 100644
index 000000000..8d16f8f8e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/shutdown_env/README.md
@@ -0,0 +1,53 @@
+oVirt environment shutdown
+=========
+
+The `shutdown_env` role iterates through all the entities (vms and hosts) in an oVirt/RHV cluster and performs a clean and ordered shutdown.
+It also handles an Hosted-Engine and hyper-converged GlusterFS environment as a special case automatically detecting it.
+The role is intended to be run only against the engine machine.
+Please note that host shutdown is async and the playbook terminates before HE hosts are really down.
+
+If on an Hosted-Engine environment, global maintenance mode will be set:
+the user has to manually exit it in order to get the engine VM automatically powered up once needed.
+
+A startup mode is also available:
+in the startup mode the role will bring up all the power management configured hosts and it
+will unset the global maintenance mode if on an hosted-engine environment.
+The startup mode will be executed only if the 'startup' tag is applied; shutdown mode is the default.
+The startup mode requires the engine to be already up:
+power on it if it's a dedicated host, power on at least one of HE hosts (2 if on an hyperconverged env) and exit the global maintenance mode or manually start the engine VM with hosted-engine --vm-start
+
+According to host power on order the engine could elect a new SPM host or reconstruct the master storage domain.
+The environment can take up to 10 minutes to come back to a stable condition.
+Possible improvements are tracked here: https://bugzilla.redhat.com/1609029
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: oVirt shutdown environment
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ engine_url: https://ovirt-engine.example.com/ovirt-engine/api
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ roles:
+ - role: shutdown_env
+ collections:
+ - ovirt.ovirt
+```
+
+Demo
+----
+ Here a demo showing a clean and ordered shutdown of an hyper-converged hosted-engine environment with 3 hosts, 3 regular VMs plus the HE one.
+[![asciicast](https://asciinema.org/a/261501.svg)](https://asciinema.org/a/261501)
+
+License
+-------
+
+Apache License 2.0
diff --git a/ansible_collections/ovirt/ovirt/roles/shutdown_env/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/shutdown_env/defaults/main.yml
new file mode 100644
index 000000000..87f80142a
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/shutdown_env/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+host_names:
+ - '*'
diff --git a/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/passwords.yml b/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/passwords.yml
new file mode 100644
index 000000000..92c7613c9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/shutdown_env.yml b/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/shutdown_env.yml
new file mode 100644
index 000000000..26340f80e
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/shutdown_env.yml
@@ -0,0 +1,18 @@
+---
+- name: oVirt shutdown env
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_url: https://ovirt.example.com/ovirt-engine/api
+ engine_user: admin@internal
+
+ roles:
+ - role: shutdown_env
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/shutdown_env/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/shutdown_env/tasks/main.yml
new file mode 100644
index 000000000..8b6db1e69
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/shutdown_env/tasks/main.yml
@@ -0,0 +1,236 @@
+---
+- name: shutdown_env role
+ block:
+
+ - name: Populate service facts
+ ansible.builtin.service_facts:
+
+ - name: Enforce ovirt-engine machine
+ ansible.builtin.fail:
+ msg: >
+ This role has be designed to be run only against the machine
+ where ovirt-engine is running.
+ when: '"ovirt-engine.service" not in ansible_facts.services'
+
+ - name: Enforce ovirt-engine status
+ ansible.builtin.fail:
+ msg: >
+ ovirt-engine is required to be enabled and running in order
+ to correctly run this role.
+ when: ansible_facts.services["ovirt-engine.service"].state != 'running'
+
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ token: "{{ engine_token | default(lookup('env','OVIRT_TOKEN')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: loggedin
+ tags:
+ - always
+
+ - name: Get hosts
+ ovirt_host_info:
+ auth: "{{ ovirt_auth }}"
+ all_content: true
+ register: hosts_result
+
+ - name: Set a variable
+ ansible.builtin.set_fact:
+ startup: false
+
+ - name: Set a variable
+ ansible.builtin.set_fact:
+ startup: true
+ tags: ['never', 'startup']
+
+ - name: Define a query for HE hosts
+ ansible.builtin.set_fact:
+ he_hosts: >-
+ {{ hosts_result.ovirt_hosts | selectattr('hosted_engine', 'defined') | selectattr('hosted_engine.configured') | list }}
+
+ - name: Define a query for non HE hosts
+ ansible.builtin.set_fact:
+ non_he_hosts: >-
+ {{ hosts_result.ovirt_hosts | difference(he_hosts) }}
+
+ - name: Define a query for non HE hosts with power management
+ ansible.builtin.set_fact:
+ non_he_hosts_ipmi: >-
+ {{ non_he_hosts | selectattr('power_management', 'defined') |
+ selectattr('power_management.enabled') | list }}
+
+ - name: Define a query for non HE hosts without power management
+ ansible.builtin.set_fact:
+ non_he_hosts_noipmi: "{{ non_he_hosts | difference(non_he_hosts_ipmi) }}"
+
+ - name: Define a query for hosts with power management
+ ansible.builtin.set_fact:
+ hosts_ipmi: >-
+ {{ hosts_result.ovirt_hosts | selectattr('power_management', 'defined') | selectattr('power_management.enabled') | list }}
+
+ - name: Define commands
+ ansible.builtin.set_fact:
+ he_shutdown_cmd: >-
+ while hosted-engine --vm-status | grep "\"vm\": \"up\"" >/dev/null;
+ do sleep 5;
+ done;
+ sanlock client shutdown -f 1;
+ shutdown -h now
+ non_he_noipmi_shutdown_cmd: >-
+ while pgrep qemu-kvm >/dev/null; do sleep 5; done; shutdown -h now
+ gmaintenance_mode_cmd: >-
+ hosted-engine --set-maintenance --mode=global
+ ugmaintenance_mode_cmd: >-
+ hosted-engine --set-maintenance --mode=none
+
+ - name: Get VM list
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ all_content: true
+ register: vm_result
+
+ - name: Shoutdown VMs and hosts
+ block:
+ - name: Shutdown all VMs, except HostedEngine
+ ovirt_vm:
+ state: stopped
+ name: "{{ item.name }}"
+ auth: "{{ ovirt_auth }}"
+ wait: true
+ when: "item.origin != 'managed_hosted_engine'"
+ with_items:
+ - "{{ vm_result.ovirt_vms }}"
+ failed_when: false
+
+ - name: Refresh VM list
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ all_content: true
+ register: vm_result
+
+ - name: Forcefully shutdown remaining VMs, except HostedEngine
+ ovirt_vm:
+ state: stopped
+ name: "{{ item.name }}"
+ auth: "{{ ovirt_auth }}"
+ wait: true
+ force: true
+ when: "item.origin != 'managed_hosted_engine' and item.status != 'down'"
+ with_items:
+ - "{{ vm_result.ovirt_vms }}"
+
+ - name: Shutdown hosts, except HE ones, via IPMI (if configured)
+ ovirt_host:
+ state: stopped
+ name: "{{ item.name }}"
+ auth: "{{ ovirt_auth }}"
+ with_items:
+ - "{{ non_he_hosts_ipmi }}"
+
+ - name: Shutdown remaining non HE hosts
+ ansible.builtin.command: >-
+ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
+ -i /etc/pki/ovirt-engine/keys/engine_id_rsa
+ -p {{ item.ssh.port }}
+ -t root@{{ item.address }}
+ '{{ non_he_noipmi_shutdown_cmd }}'
+ async: 1000
+ poll: 0
+ with_items:
+ - "{{ non_he_hosts_noipmi }}"
+ failed_when: false
+ changed_when: false
+
+ - name: Set global maintenance mode
+ ansible.builtin.command: >-
+ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
+ -i /etc/pki/ovirt-engine/keys/engine_id_rsa
+ -p {{ item.ssh.port }} -t root@{{ item.address }}
+ '{{ gmaintenance_mode_cmd }}'
+ with_items:
+ - "{{ he_hosts }}"
+ ignore_errors: true
+ changed_when: false
+ register: globalmm
+
+ - name: Set globalmm_set variable
+ ansible.builtin.set_fact:
+ globalmm_set: "{{ globalmm.results | rejectattr('failed') | list | length }}"
+ when: globalmm is defined and globalmm.results is defined
+
+ - name: Enforce global maintenance mode
+ ansible.builtin.fail:
+ msg: >
+ Failed setting global maintenance mode.
+ when: he_hosts|length > 0 and globalmm_set|int == 0
+
+ - name: Warn about HE global maintenace mode
+ ansible.builtin.debug:
+ msg: >
+ HE global maintenance mode has been set; you have to exit it to get the engine VM started when needed
+ when: globalmm_set|int > 0
+
+ - name: Shutdown of HE hosts
+ ansible.builtin.command: >-
+ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
+ -i /etc/pki/ovirt-engine/keys/engine_id_rsa -p {{ item.ssh.port }}
+ -t root@{{ item.address }} '{{ he_shutdown_cmd }}'
+ async: 1000
+ poll: 0
+ with_items:
+ - "{{ he_hosts }}"
+ changed_when: false
+
+ - name: Shutdown engine host/VM
+ ansible.builtin.command: shutdown -h now
+ async: 1000
+ poll: 0
+ changed_when: false
+
+ when: not startup
+
+ - name: Startup mode
+ block:
+ - name: Power-on IPMI configured hosts
+ ovirt_host:
+ state: started
+ name: "{{ item.name }}"
+ auth: "{{ ovirt_auth }}"
+ with_items:
+ - "{{ hosts_ipmi }}"
+
+ - name: Unset global maintenance mode
+ ansible.builtin.command: >-
+ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
+ -i /etc/pki/ovirt-engine/keys/engine_id_rsa -p {{ item.ssh.port }}
+ -t root@{{ item.address }} '{{ ugmaintenance_mode_cmd }}'
+ with_items:
+ - "{{ he_hosts }}"
+ ignore_errors: true
+ changed_when: false
+ register: uglobalmm
+
+ - name: Set globalmm_set variable
+ ansible.builtin.set_fact:
+ globalmm_set: "{{ uglobalmm.results | rejectattr('failed') | list | length }}"
+ when: uglobalmm is defined and uglobalmm.results is defined
+
+ - name: Enforce no global maintenance mode
+ ansible.builtin.fail:
+ msg: >
+ Failed unsetting global maintenance mode.
+ when: he_hosts|length > 0 and globalmm_set|int == 0
+ when: startup
+
+ always:
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when: not loggedin.skipped | default(false)
+ tags:
+ - always
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/README.md b/ansible_collections/ovirt/ovirt/roles/vm_infra/README.md
new file mode 100644
index 000000000..807cb232d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/README.md
@@ -0,0 +1,324 @@
+oVirt Virtual Machine Infrastructure
+====================================
+
+The `vm_infra` role manages the virtual machine infrastructure in oVirt.
+This role also creates inventory of created virtual machines it defines if
+`wait_for_ip` is set to `true` and state of virtual machine is `running`.
+All defined virtual machines are part of `ovirt_vm` inventory group.
+Role also creates `ovirt_tag_{tag_name}` groups if there are any
+tags assigned to a virtual machine and places all virtual machines with that tag
+to that inventory group.
+
+Consider the following variable structure:
+
+```yaml
+vms:
+ - name: myvm1
+ tag: mytag1
+ profile: myprofile
+
+ - name: myvm2
+ tag: mytag2
+ profile: myprofile
+```
+
+The role will create inventory group `ovirt_vm` with both of the virtual
+machines - `myvm1` and `myvm2`. The role also creates inventory group `ovirt_tag_mytag1`
+with virtual machine `myvm1` and inventory group `ovirt_tag_mytag2` with virtual
+machine `myvm2`.
+
+Limitations
+-----------
+
+ * Does not support Ansible Check Mode (Dry Run).
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|--------------------------------|---------------|----------------------------------------------|
+| vms | UNDEF | List of dictionaries with virtual machine specifications. |
+| affinity_groups | UNDEF | List of dictionaries with affinity groups specifications. |
+| wait_for_ip | false | If true, the playbook should wait for the virtual machine IP reported by the guest agent. |
+| wait_for_ip_version | v4 | Specify which IP version should be wait for. Either v4 or v6. |
+| wait_for_ip_range | 0.0.0.0/0 | Specify CIDR of virutal machine IP which should be reported. Works only for IPv4. |
+| debug_vm_create | false | If true, logs the tasks of the virtual machine being created. The log can contain passwords. |
+| vm_infra_create_single_timeout | 180 | Time in seconds to wait for VM to be created and started (if state is running). |
+| vm_infra_create_poll_interval | 15 | Polling interval. Time in seconds to wait between check of state of VM. |
+| vm_infra_create_all_timeout | vm_infra_create_single_timeout * (vms.length) | Total time to wait for all VMs to be created/started. |
+| vm_infra_wait_for_ip_retries | 5 | Number of retries to check if VM is reporting it's IP address. |
+| vm_infra_wait_for_ip_delay | 5 | Polling interval of IP address. Time in seconds to wait between check if VM reports IP address. |
+
+
+The `vms` and `profile` variables can contain following attributes, note that if you define same variable in both the value in `vms` has precendence:
+
+| Name | Default value | |
+|--------------------|-----------------------|--------------------------------------------|
+| name | UNDEF | Name of the virtual machine to create. |
+| tag | UNDEF | Name of the tag to assign to the virtual machine. Only administrator users can use this attribute. |
+| cloud_init | UNDEF | Dictionary with values for Unix-like Virtual Machine initialization using cloud init. See below <i>cloud_init</i> section for more detailed description. |
+| cloud_init_nics | UNDEF | List of dictionaries representing network interafaces to be setup by cloud init. See below <i>cloud_init_nics</i> section for more detailed description. |
+| sysprep | UNDEF | Dictionary with values for Windows Virtual Machine initialization using sysprep. See below <i>sysprep</i> section for more detailed description. |
+| profile | UNDEF | Dictionary specifying the virtual machine hardware. See the table below. |
+| state | present | Should the Virtual Machine be stopped, present or running. Takes precedence before state value in profile. |
+| nics | UNDEF | List of dictionaries specifying the NICs of the virtual machine. See below for more detailed description. |
+| cluster | UNDEF | Name of the cluster where the virtual machine will be created. |
+| clone | No | If yes then the disks of the created virtual machine will be cloned and independent of the template. This parameter is used only when state is running or present and VM didn't exist before. |
+| template | Blank | Name of template that the virtual machine should be based on. |
+| template_version | UNDEF | Version number of the template to be used for VM. By default the latest available version of the template is used. |
+| memory | UNDEF | Amount of virtual machine memory. |
+| memory_max | UNDEF | Upper bound of virtual machine memory up to which memory hot-plug can be performed. |
+| memory_guaranteed | UNDEF | Amount of minimal guaranteed memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB). <i>memory_guaranteed</i> parameter can't be lower than <i>memory</i> parameter. |
+| cores | UNDEF | Number of CPU cores used by the the virtual machine. |
+| sockets | UNDEF | Number of virtual CPUs sockets of the Virtual Machine. |
+| cpu_shares | UNDEF | Set a CPU shares for this Virtual Machine. |
+| cpu_threads | UNDEF | Set a CPU threads for this Virtual Machine. |
+| disks | UNDEF | List of dictionaries specifying the additional virtual machine disks. See below for more detailed description. |
+| nics | UNDEF | List of dictionaries specifying the NICs of the virtual machine. See below for more detailed description. |
+| custom_properties | UNDEF | Properties sent to VDSM to configure various hooks.<br/> Custom properties is a list of dictionary which can have following values: <br/><i>name</i> - Name of the custom property. For example: hugepages, vhost, sap_agent, etc.<br/><i>regexp</i> - Regular expression to set for custom property.<br/><i>value</i> - Value to set for custom property. |
+| high_availability | UNDEF | Whether or not the node should be set highly available. |
+| high_availability_priority | UNDEF | Indicates the priority of the virtual machine inside the run and migration queues. Virtual machines with higher priorities will be started and migrated before virtual machines with lower priorities. The value is an integer between 0 and 100. The higher the value, the higher the priority. If no value is passed, default value is set by oVirt/RHV engine. |
+| io_threads | UNDEF | Number of IO threads used by virtual machine. 0 means IO threading disabled. |
+| description | UNDEF | Description of the Virtual Machine. |
+| operating_system | UNDEF | Operating system of the Virtual Machine. For example: rhel_7x64 |
+| type | UNDEF | Type of the Virtual Machine. Possible values: desktop, server or high_performance |
+| graphical_console | UNDEF | Assign graphical console to the virtual machine.<br/>Graphical console is a dictionary which can have following values:<br/><i>headless_mode</i> - If true disable the graphics console for this virtual machine.<br/><i>protocol</i> - 'VNC', 'Spice' or both. |
+| storage_domain | UNDEF | Name of the storage domain where all virtual machine disks should be created. Considered only when template is provided.|
+| state | present | Should the Virtual Machine be stopped, present or running.|
+| ssh_key | UNDEF | SSH key to be deployed to the virtual machine. This is parameter is keep for backward compatibility and has precendece before <i>authorized_ssh_keys</i> in <i>cloud_init</i> dictionary. |
+| domain | UNDEF | The domain of the virtual machine. This is parameter is keep for backward compatibility and has precendece before <i>host_name</i> in <i>cloud_init</i> or <i>sysprep</i> dictionary.|
+| lease | UNDEF | Name of the storage domain this virtual machine lease reside on. |
+| root_password | UNDEF | The root password of the virtual machine. This is parameter is keep for backward compatibility and has precendece before <i>root_password</i> in <i>cloud_init</i> or <i>sysprep</i> dictionary.|
+| host | UNDEF | If you need to set cpu_mode as host_passthrough, you need to use this param to define host to use along with placement_policy set to pinned. |
+| cpu_mode | UNDEF | CPU mode of the virtual machine. It can be some of the following: host_passthrough, host_model or custom. |
+| placement_policy | UNDEF | The configuration of the virtual machine's placement policy. |
+| boot_devices | UNDEF | List of boot devices which should be used to boot. Valid entries are `cdrom`, `hd`, `network`. |
+| serial_console | UNDEF | True enable VirtIO serial console, False to disable it. By default is chosen by oVirt/RHV engine. |
+| serial_policy | UNDEF | Specify a serial number policy for the Virtual Machine. Following options are supported. <br/><i>vm</i> - Sets the Virtual Machine's UUID as its serial number. <br/><i>host</i> - Sets the host's UUID as the Virtual Machine's serial number. <br/><i>custom</i> - Allows you to specify a custom serial number in serial_policy_value. |
+| serial_policy_value | UNDEF | Allows you to specify a custom serial number. This parameter is used only when <i>serial_policy</i> is custom. |
+| comment | UNDEF | Comment of the virtual Machine. |
+
+The item in `disks` list of `profile` dictionary can contain following attributes:
+
+| Name | Default value | |
+|--------------------|----------------|----------------------------------------------|
+| size | UNDEF | The size of the additional disk. |
+| name | UNDEF | The name of the additional disk. |
+| id | UNDEF | Id of the disk. If you pass id of the disk and name the disk will be looked up by id and will update name of the disk if it differs from the name passed in name parameter. |
+| storage_domain | UNDEF | The name of storage domain where disk should be created. |
+| interface | UNDEF | The interface of the disk. |
+| name_prefix | True | If true the name of the vm will be used as prefix of disk name. If false only the name of disk will be used as disk name - could be useful when creating vm from template with custom disk size. |
+| format | UNDEF | Specify format of the disk. <ul><li>cow - If set, the disk will by created as sparse disk, so space will be allocated for the volume as needed. This format is also known as thin provisioned disks</li><li>raw - If set, disk space will be allocated right away. This format is also known as preallocated disks.</li></ul> |
+| bootable | UNDEF | True if the disk should be bootable. |
+| activate | UNDEF | True if the disk should be activated |
+
+The item in `nics` list of `profile` dictionary can contain following attributes:
+
+| Name | Default value | |
+|--------------------|----------------|----------------------------------------------|
+| name | UNDEF | The name of the network interface. |
+| interface | UNDEF | Type of the network interface. |
+| mac_address | UNDEF | Custom MAC address of the network interface, by default it's obtained from MAC pool. |
+| network | UNDEF | Logical network which the VM network interface should use. If network is not specified, then Empty network is used. |
+| profile | UNDEF | Virtual network interface profile to be attached to VM network interface. |
+
+The `affinity_groups` list can contain following attributes:
+
+| Name | Default value | |
+|--------------------|---------------------|----------------------------------------------|
+| cluster | UNDEF (Required) | Name of the cluster of the affinity group. |
+| description | UNDEF | Human readable description. |
+| host_enforcing | false | <ul><li>true - VM cannot start on host if it does not satisfy the `host_rule`.</li><li>false - VM will follow `host_rule` with soft enforcement.</li></ul>|
+| host_rule | UNDEF | <ul><li>positive - VM's in this group must run on this host.</li> <li>negative - VM's in this group may not run on this host</li></ul> |
+| hosts | UNDEF | List of host names assigned to this group. |
+| name | UNDEF (Required) | Name of affinity group. |
+| state | UNDEF | Whether group should be present or absent. |
+| vm_enforcing | false | <ul><li>true - VM cannot start if it cannot satisfy the `vm_rule`.</li><li>false - VM will follow `vm_rule` with soft enforcement.</li></ul> |
+| vm_rule | UNDEF | <ul><li>positive - all vms in this group try to run on the same host.</li><li>negative - all vms in this group try to run on separate hosts.</li><li>disabled - this affinity group does not take effect.</li></ul> |
+| vms | UNDEF | List of VM's to be assigned to this affinity group. |
+| wait | true | If true, the module will wait for the desired state. |
+
+The `affinity_labels` list can contain following attributes:
+
+| Name | Default value | |
+|--------------------|---------------------|----------------------------------------------|
+| cluster | UNDEF (Required) | Name of the cluster of the affinity label group. |
+| hosts | UNDEF | List of host names assigned to this label. |
+| name | UNDEF (Required) | Name of affinity label. |
+| state | UNDEF | Whether label should be present or absent. |
+| vms | UNDEF | List of VM's to be assigned to this affinity label. |
+
+The `cloud_init` dictionary can contain following attributes:
+
+| Name | Description |
+|---------------------|------------------------------------------------------|
+| host_name | Hostname to be set to Virtual Machine when deployed. |
+| timezone | Timezone to be set to Virtual Machine when deployed. |
+| user_name | Username to be used to set password to Virtual Machine when deployed. |
+| root_password | Password to be set for user specified by user_name parameter. By default it's set for root user. |
+| authorized_ssh_keys | Use this SSH keys to login to Virtual Machine. |
+| regenerate_ssh_keys | If True SSH keys will be regenerated on Virtual Machine. |
+| custom_script | Cloud-init script which will be executed on Virtual Machine when deployed. This is appended to the end of the cloud-init script generated by any other options. |
+| dns_servers | DNS servers to be configured on Virtual Machine. |
+| dns_search | DNS search domains to be configured on Virtual Machine. |
+| nic_boot_protocol | Set boot protocol of the network interface of Virtual Machine. Can be one of none, dhcp or static. |
+| nic_ip_address | If boot protocol is static, set this IP address to network interface of Virtual Machine. |
+| nic_netmask | If boot protocol is static, set this netmask to network interface of Virtual Machine. |
+| nic_gateway | If boot protocol is static, set this gateway to network interface of Virtual Machine. |
+| nic_name | Set name to network interface of Virtual Machine. |
+| nic_on_boot | If True network interface will be set to start on boot. |
+
+The `sysprep` dictionary can contain following attributes:
+
+| Name | Description |
+|---------------------|------------------------------------------------------|
+| host_name | Hostname to be set to Virtual Machine when deployed. |
+| active_directory_ou | Active Directory Organizational Unit, to be used for login of user. |
+| org_name | Organization name to be set to Windows Virtual Machine. |
+| user_name | Username to be used for set password to Windows Virtual Machine. |
+| root_password | Password to be set for user specified by user_name parameter. By default it's set for root user. |
+| windows_license_key | License key to be set to Windows Virtual Machine. |
+| input_locale | Input localization of the Windows Virtual Machine. |
+| system_locale | System localization of the Windows Virtual Machine. |
+| ui_language | UI language of the Windows Virtual Machine. |
+| domain | Domain to be set to Windows Virtual Machine. |
+| timezone | Timezone to be set to Windows Virtual Machine. |
+
+The `cloud_init_nics` List of dictionaries representing network interafaces to be setup by cloud init. This option is used, when user needs to setup more network interfaces via cloud init.
+If one network interface is enough, user should use cloud_init nic_* parameters. cloud_init nic_* parameters are merged with cloud_init_nics parameters. Dictionary can contain following values.
+
+| Name | Description |
+|---------------------|------------------------------------------------------|
+| nic_boot_protocol | Set boot protocol of the network interface of Virtual Machine. Can be one of none, dhcp or static. |
+| nic_ip_address | If boot protocol is static, set this IP address to network interface of Virtual Machine. |
+| nic_netmask | If boot protocol is static, set this netmask to network interface of Virtual Machine. |
+| nic_gateway | If boot protocol is static, set this gateway to network interface of Virtual Machine. |
+| nic_name | Set name to network interface of Virtual Machine. |
+| nic_on_boot | If True network interface will be set to start on boot. |
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ httpd_vm:
+ cluster: production
+ domain: example.com
+ template: rhel7
+ memory: 2GiB
+ cores: 2
+ ssh_key: ssh-rsa AAA...LGx user@fqdn
+ disks:
+ - size: 10GiB
+ name: data
+ storage_domain: mynfsstorage
+ interface: virtio
+
+ db_vm:
+ cluster: production
+ domain: example.com
+ template: rhel7
+ memory: 4GiB
+ cores: 1
+ ssh_key: ssh-rsa AAA...LGx user@fqdn
+ disks:
+ - size: 50GiB
+ name: data
+ storage_domain: mynfsstorage
+ interface: virtio
+ nics:
+ - name: ovirtmgmt
+ network: ovirtmgmt
+ profile: ovirtmgmt
+
+ vms:
+ - name: postgresql-vm-0
+ tag: postgresql_vm
+ profile: "{{ db_vm }}"
+ - name: postgresql-vm-1
+ tag: postgresql_vm
+ profile: "{{ db_vm }}"
+ - name: apache-vm
+ tag: httpd_vm
+ profile: "{{ httpd_vm }}"
+
+ affinity_groups:
+ - name: db-ag
+ cluster: production
+ vm_enforcing: true
+ vm_rule: negative
+ vms:
+ - postgresql-vm-0
+ - postgresql-vm-1
+
+ roles:
+ - vm_infra
+ collections:
+ - ovirt.ovirt
+```
+
+The example below shows how to use inventory created by `vm_infra` role in follow-up play.
+
+```yaml
+---
+- name: Deploy apache VM
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ wait_for_ip: true
+
+ httpd_vm:
+ cluster: production
+ state: running
+ domain: example.com
+ template: rhel7
+ memory: 2GiB
+ cores: 2
+ ssh_key: ssh-rsa AAA...LGx user@fqdn
+ disks:
+ - size: 10GiB
+ name: data
+ storage_domain: mynfsstorage
+ interface: virtio
+
+ vms:
+ - name: apache-vm
+ tag: apache
+ profile: "{{ httpd_vm }}"
+
+ roles:
+ - vm_infra
+ collections:
+ - ovirt.ovirt
+
+- name: Deploy apache on VM
+ hosts: ovirt_tag_apache
+
+ vars_files:
+ - apache_vars.yml
+
+ roles:
+ - geerlingguy.apache
+```
+
+[![asciicast](https://asciinema.org/a/111662.png)](https://asciinema.org/a/111662)
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/defaults/main.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/defaults/main.yml
new file mode 100644
index 000000000..be82bfa83
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+debug_vm_create: false
+wait_for_ip: false
+wait_for_ip_version: v4
+wait_for_ip_range: "0.0.0.0/0"
+
+# Create VMs timeouts:
+vm_infra_create_single_timeout: 180
+vm_infra_create_poll_interval: 15
+vm_infra_create_all_timeout: "{{ vm_infra_create_single_timeout * (vms | length) | int }}"
+
+# Wait for IPs timeouts:
+vm_infra_wait_for_ip_retries: 5
+vm_infra_wait_for_ip_delay: 5
+vms_passwords: []
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra.yml
new file mode 100644
index 000000000..84081f387
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra.yml
@@ -0,0 +1,48 @@
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ debug_vm_create: true
+ db_vm:
+ state: running
+ cluster: mycluster
+ template: centos7
+ memory: 1GiB
+ memory_max: 2GiB
+ cores: 1
+ tag:
+ - db
+ - dbvm
+ disks:
+ - size: 1GiB
+ name: data
+ storage_domain: data
+ interface: virtio
+
+ vms:
+ - name: postgresql-vm-0
+ memory: 2GiB
+ cloud_init:
+ host_name: ps.example.com
+ root_password: 'mypassword'
+ authorized_ssh_keys: ssh-rsa A...LGx ondra@ondra
+ profile: "{{ db_vm }}"
+ tag:
+ - pgsql
+ - httpd
+
+ roles:
+ - vm_infra
+ collections:
+ - ovirt.ovirt
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra_inv.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra_inv.yml
new file mode 100644
index 000000000..354145a8f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra_inv.yml
@@ -0,0 +1,72 @@
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ # Must be set to true to create inventory
+ wait_for_ip: true
+ httpd_vm:
+ # Must be set to running to create inventory
+ state: running
+ cluster: mycluster
+ template: mytemplate
+ memory: 1GiB
+ memory_max: 2GiB
+ cores: 1
+ root_password: '123456'
+
+ db_vm:
+ # Must be set to running to create inventory
+ state: running
+ cluster: mycluster
+ template: mytemplate
+ memory: 1GiB
+ memory_max: 2GiB
+ cores: 1
+ tag: db
+ ssh_key: ssh-rsa AAAAB...Gx ondra@ondra
+
+ vms:
+ - name: httpd-vm-1
+ profile: httpd_vm
+ tag: httpd
+ - name: db-vm-1
+ profile: db_vm
+ - name: db-vm-2
+ profile: db_vm
+
+ roles:
+ - vm_infra
+ collections:
+ - ovirt.ovirt
+
+
+# This role also creates inventory of created virtual machines it defines if wait_for_ip is set to true
+# and VM state is running. All defined virtual machine are part of ovirt_vm inventory group. Role also
+# create ovirt_tag_{tag_name} groups if there are any tags assigned to the virtual machine and place
+# all virtual machine with that tag to that inventory group.
+- name: Print info about httpd VM
+ hosts: ovirt_tag_httpd
+
+ tasks:
+ - name: Print info about httpd VM
+ debug:
+ msg: "{{ hostvars }}"
+
+- name: Print info about db VMs
+ hosts: ovirt_tag_db
+
+ tasks:
+ - name: Print info about db VM
+ debug:
+ msg: "{{ hostvars }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/passwords.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/passwords.yml
new file mode 100644
index 000000000..92c7613c9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_groups.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_groups.yml
new file mode 100644
index 000000000..c95e3978d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_groups.yml
@@ -0,0 +1,22 @@
+---
+#########################################
+# Affinity groups
+#########################################
+- name: Create affinity groups
+ ovirt_affinity_group:
+ auth: "{{ ovirt_auth }}"
+ cluster: "{{ item.cluster | default(omit) }}"
+ description: "{{ item.description | default(omit) }}"
+ host_enforcing: "{{ item.host_enforcing | default(omit) }}"
+ host_rule: "{{ item.host_rule | default(omit) }}"
+ hosts: "{{ item.hosts | default(omit) }}"
+ name: "{{ item.name }}"
+ state: "{{ item.state | default(omit) }}"
+ vm_enforcing: "{{ item.vm_enforcing | default(omit) }}"
+ vm_rule: "{{ item.vm_rule | default(omit) }}"
+ vms: "{{ item.vms | default([]) }}"
+ wait: "{{ item.wait | default(omit) }}"
+ with_items:
+ - "{{ affinity_groups | default([]) }}"
+ tags:
+ - affinity_groups
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_labels.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_labels.yml
new file mode 100644
index 000000000..cf945dba4
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_labels.yml
@@ -0,0 +1,13 @@
+---
+- name: Create affinity labels
+ ovirt_affinity_label:
+ auth: "{{ ovirt_auth }}"
+ cluster: "{{ item.cluster | default(omit) }}"
+ hosts: "{{ item.hosts | default(omit) }}"
+ name: "{{ item.name }}"
+ state: "{{ item.state | default(omit) }}"
+ vms: "{{ item.vms | default([]) }}"
+ with_items:
+ - "{{ affinity_labels | default([]) }}"
+ tags:
+ - affinity_labels
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_inventory.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_inventory.yml
new file mode 100644
index 000000000..4316dfe9d
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_inventory.yml
@@ -0,0 +1,44 @@
+---
+- name: Fetch created VMs
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ vms | map(attribute='name') | join(' or name=') }}"
+ fetch_nested: true
+ nested_attributes:
+ - ips
+ - name
+ register: created_vms_info
+
+- name: Set ovirt_vms
+ ansible.builtin.set_fact:
+ ovirt_vms: "{{ created_vms_info.ovirt_vms }}"
+
+- name: Create inventory of VMs IPv4
+ no_log: true
+ ansible.builtin.add_host:
+ name: "{{ item.name }}"
+ ansible_host: "{{ item | ovirt.ovirt.ovirtvmipv4(network_ip=wait_for_ip_range) }}"
+ groups: "{{ (['ovirt_tag_'] * item.tags | length) | zip(item.tags | map(attribute='name') | list) | map('join') | list + ['ovirt_vm'] }}"
+ ansible_user: root
+ ansible_password: "{{ vms_passwords | ovirt.ovirt.filtervalue('name', item.name) | map(attribute='root_password') | first | default(omit) }}"
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+ with_items: "{{ ovirt_vms }}"
+ changed_when: false
+ when: "wait_for_ip_version == 'v4'"
+ loop_control:
+ label: "{{ item.name }}"
+
+- name: Create inventory of VMs IPv6
+ no_log: true
+ ansible.builtin.add_host:
+ name: "{{ item.name }}"
+ ansible_host: "{{ item | ovirt.ovirt.ovirtvmipv6 }}"
+ groups: "{{ (['ovirt_tag_'] * item.tags | length) | zip(item.tags | map(attribute='name') | list) | map('join') | list + ['ovirt_vm'] }}"
+ ansible_user: root
+ ansible_password: "{{ vms_passwords | ovirt.ovirt.filtervalue('name', item.name) | map(attribute='root_password') | first | default(omit) }}"
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+ with_items: "{{ ovirt_vms }}"
+ changed_when: false
+ when: "wait_for_ip_version == 'v6'"
+ loop_control:
+ label: "{{ item.name }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_vms.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_vms.yml
new file mode 100644
index 000000000..dbca0bb61
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_vms.yml
@@ -0,0 +1,48 @@
+---
+- name: "Create VM {{ current_vm.name }}"
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: "present"
+ name: "{{ current_vm.name }}"
+ clone: "{{ current_vm.clone | default(current_vm.profile.clone) | default(omit) }}"
+ cluster: "{{ current_vm.cluster | default(current_vm.profile.cluster) | default(omit) }}"
+ template: "{{ current_vm.template | default(current_vm.profile.template) | default(omit) }}"
+ template_version: "{{ current_vm.template_version | default(current_vm.profile.template_version) | default(omit) }}"
+ ballooning_enabled: "{{ current_vm.ballooning_enabled | default(current_vm.profile.ballooning_enabled) | default(omit) }}"
+ host: "{{ current_vm.host | default(current_vm.profile.host) | default(omit) }}"
+ memory: "{{ current_vm.memory | default(current_vm.profile.memory) | default(omit) }}"
+ memory_max: "{{ current_vm.memory_max | default(current_vm.profile.memory_max) | default(omit) }}"
+ memory_guaranteed: "{{ current_vm.memory_guaranteed | default(current_vm.profile.memory_guaranteed) | default(omit) }}"
+ cpu_cores: "{{ current_vm.cores | default(current_vm.profile.cores) | default(omit) }}"
+ cpu_sockets: "{{ current_vm.sockets | default(current_vm.profile.sockets) | default(omit) }}"
+ cpu_shares: "{{ current_vm.cpu_shares | default(current_vm.profile.cpu_shares) | default(omit) }}"
+ cpu_threads: "{{ current_vm.cpu_threads | default(current_vm.profile.cpu_threads) | default(omit) }}"
+ cpu_mode: "{{ current_vm.cpu_mode | default(current_vm.profile.cpu_mode) | default(omit) }}"
+ boot_devices: "{{ current_vm.boot_devices | default(current_vm.profile.boot_devices) | default(omit) }}"
+ placement_policy: "{{ 'user_migratable'
+ if ((current_vm.profile.cpu_mode is defined and current_vm.profile.cpu_mode == 'host_passthrough')
+ or (current_vm.cpu_mode is defined and current_vm.cpu_mode == 'host_passthrough'))
+ else current_vm.placement_policy | default(current_vm.profile.placement_policy) | default(omit) }}"
+ custom_properties: "{{ current_vm.custom_properties | default(current_vm.profile.custom_properties) | default(omit) }}"
+ description: "{{ current_vm.description | default(current_vm.profile.description) | default(omit) }}"
+ operating_system: "{{ current_vm.operating_system | default(current_vm.profile.operating_system) | default(omit) }}"
+ type: "{{ current_vm.type | default(current_vm.profile.type) | default(omit) }}"
+ high_availability: "{{ current_vm.high_availability | default(current_vm.profile.high_availability) | default(omit) }}"
+ high_availability_priority: "{{ current_vm.high_availability_priority | default(current_vm.profile.high_availability_priority) | default(omit) }}"
+ io_threads: "{{ current_vm.io_threads | default(current_vm.profile.io_threads ) | default(omit) }}"
+ storage_domain: "{{ current_vm.storage_domain | default(current_vm.profile.storage_domain) | default(omit) }}"
+ disk_format: "{{ current_vm.disk_format | default(current_vm.profile.disk_format) | default(omit) }}"
+ lease: "{{ current_vm.lease | default(current_vm.profile.lease) | default(omit) }}"
+ serial_console: "{{ current_vm.serial_console | default(current_vm.profile.serial_console) | default(omit) }}"
+ serial_policy: "{{ current_vm.serial_policy | default(current_vm.profile.serial_policy) | default(omit) }}"
+ serial_policy_value: "{{ current_vm.serial_policy_value | default(current_vm.profile.serial_policy_value) | default(omit) }}"
+ timeout: "{{ vm_infra_create_single_timeout }}"
+ comment: "{{ current_vm.comment | default(current_vm.profile.comment) | default(omit) }}"
+ changed_when: false
+ async: "{{ vm_infra_create_single_timeout }}"
+ poll: 0
+ register: added_vm
+
+- name: "Add created vm to all_vms"
+ ansible.builtin.set_fact:
+ all_vms: "{{ all_vms | default([]) + [added_vm] }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/main.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/main.yml
new file mode 100644
index 000000000..2ea6df82f
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/main.yml
@@ -0,0 +1,63 @@
+---
+- name: Set sensitive vms
+ ansible.builtin.set_fact:
+ sensitive_vms: "{{ vms }}"
+ no_log: true
+
+- name: Check if VMs are correct
+ ansible.builtin.set_fact:
+ vms: "{{ vms | ovirt.ovirt.removesensitivevmdata }}"
+
+- name: Main block
+ block:
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ hostname: "{{ engine_fqdn | default(lookup('env','OVIRT_HOSTNAME')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: loggedin
+
+ - name: Split list of VMs
+ ansible.builtin.set_fact:
+ create_vms: "{{ create_vms | default([]) + [item] }}"
+ with_items: "{{ vms }}"
+ when: (item.state is undefined and item.profile.state is defined and item.profile.state != 'absent')
+ or (item.state is defined and item.state != 'absent') or (item.state is undefined and item.profile.state is undefined)
+ # Uses item state first if not defined it will check profile state and use it.
+
+ - name: Split list of sensitive VMs
+ ansible.builtin.set_fact:
+ create_sensitive_vms: "{{ create_sensitive_vms | default([]) + [item] }}"
+ with_items: "{{ sensitive_vms }}"
+ no_log: true
+ when: (item.state is undefined and item.profile.state is defined and item.profile.state != 'absent')
+ or (item.state is defined and item.state != 'absent') or (item.state is undefined and item.profile.state is undefined)
+ # Uses item state first if not defined it will check profile state and use it.
+
+ - name: Delete VM
+ include_tasks: vm_state_absent.yml
+ with_items: "{{ vms }}"
+ loop_control:
+ loop_var: current_vm
+ when: (current_vm.state is defined and current_vm.state == 'absent')
+ or (current_vm.profile.state is defined and current_vm.state is undefined and current_vm.profile.state == 'absent')
+
+ - name: Include create VM
+ include_tasks: vm_state_present.yml
+ when: create_vms is defined
+
+ always:
+ - name: Unset facts
+ ansible.builtin.set_fact:
+ create_vms: []
+ create_sensitive_vms: []
+
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when: not loggedin.skipped | default(false)
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/manage_state.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/manage_state.yml
new file mode 100644
index 000000000..7d42991e9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/manage_state.yml
@@ -0,0 +1,91 @@
+---
+########################################################################
+# Legacy modification of cloud_init:
+########################################################################
+- name: Set cloud_init
+ block:
+ - name: Set cloud_init fact
+ ansible.builtin.set_fact:
+ cloud_init: "{{ current_vm.cloud_init | default(current_vm.profile.cloud_init) | default({}) }}"
+
+ - name: Define cloud_init block
+ block:
+ - name: Define cloud_init user_name
+ ansible.builtin.set_fact:
+ cloud_init: "{{ cloud_init | combine({'user_name': cloud_init.user_name | default('root') }) }}"
+
+ - name: Define cloud_init authorized_ssh_keys
+ ansible.builtin.set_fact:
+ cloud_init: "{{ cloud_init | combine({'authorized_ssh_keys': current_vm.ssh_key | default(current_vm.profile.ssh_key)}) }}"
+ when: current_vm.profile.ssh_key is defined or current_vm.ssh_key is defined
+
+ - name: Define cloud_init root_password
+ ansible.builtin.set_fact:
+ cloud_init: "{{ cloud_init | combine({'root_password': current_vm.root_password | default(current_vm.profile.root_password)}) }}"
+ when: current_vm.profile.root_password is defined or current_vm.root_password is defined
+
+ - name: Define cloud_init host_name
+ ansible.builtin.set_fact:
+ cloud_init: "{{ cloud_init | combine({'host_name': current_vm.name~'.'~current_vm.domain | default(current_vm.profile.domain)}) }}"
+ when: current_vm.profile.domain is defined or current_vm.domain is defined
+
+ - name: Define vm/password dictionary
+ ansible.builtin.set_fact:
+ vms_passwords: "{{ vms_passwords + [{'name': current_vm.name, 'root_password': cloud_init.root_password}] }}"
+ when: "'root_password' in cloud_init"
+ when: current_vm.cloud_init is defined or current_vm.profile.cloud_init is defined
+ no_log: true
+
+########################################################################
+# Legacy modification of sysprep:
+########################################################################
+
+- name: Set sysprep
+ block:
+ - name: Set sysprep fact
+ ansible.builtin.set_fact:
+ sysprep: "{{ current_vm.sysprep | default(current_vm.profile.sysprep) | default({}) }}"
+
+ - name: Define cloud_init block
+ block:
+ - name: Define cloud_init user_name
+ ansible.builtin.set_fact:
+ sysprep: "{{ sysprep | combine({'user_name': sysprep.user_name | default('Administrator') }) }}"
+
+ - name: Define cloud_init root_password
+ ansible.builtin.set_fact:
+ sysprep: "{{ sysprep | combine({'root_password': current_vm.root_password | default(current_vm.profile.root_password)}) }}"
+ when: current_vm.profile.root_password is defined or current_vm.root_password is defined
+
+ - name: Define cloud_init host_name
+ ansible.builtin.set_fact:
+ sysprep: "{{ sysprep | combine({'host_name': current_vm.name~'.'~current_vm.domain | default(current_vm.profile.domain)}) }}"
+ when: current_vm.profile.domain is defined or current_vm.domain is defined
+
+ - name: Define vm/password dictionary
+ ansible.builtin.set_fact:
+ vms_passwords: "{{ vms_passwords + [{'name': current_vm.name, 'root_password': sysprep.root_password}] }}"
+ when: "'root_password' in sysprep"
+ when: current_vm.sysprep is defined or current_vm.profile.sysprep is defined
+ no_log: true
+########################################################################
+########################################################################
+
+- name: "Manage VM '{{ current_vm.name }}' state"
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ current_vm.state | default(current_vm.profile.state) | default('present') }}"
+ name: "{{ current_vm.name }}"
+ sysprep: "{{ (sysprep | length > 0) | ternary(sysprep, omit) }}"
+ cloud_init: "{{ (cloud_init | length > 0) | ternary(cloud_init, omit) }}"
+ cloud_init_persist: "{{ current_vm.cloud_init_persist | default(current_vm.profile.cloud_init_persist) | default(omit) }}"
+ cloud_init_nics: "{{ current_vm.cloud_init_nics | default(current_vm.profile.cloud_init_nics) | default(omit) }}"
+ timeout: "{{ vm_infra_create_single_timeout }}"
+ changed_when: false
+ async: "{{ vm_infra_create_single_timeout }}"
+ poll: 0
+ register: started_vm
+
+- name: Set started_vms list
+ ansible.builtin.set_fact:
+ started_vms: "{{ started_vms | default([]) + [started_vm] }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_absent.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_absent.yml
new file mode 100644
index 000000000..386344a41
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_absent.yml
@@ -0,0 +1,6 @@
+---
+- name: "Remove VM '{{ current_vm.name }}'"
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: "absent"
+ name: "{{ current_vm.name }}"
diff --git a/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_present.yml b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_present.yml
new file mode 100644
index 000000000..b34fba9d9
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_present.yml
@@ -0,0 +1,159 @@
+---
+- name: Create VMs
+ include_tasks: create_vms.yml
+ with_items: "{{ create_vms }}"
+ loop_control:
+ loop_var: "current_vm"
+
+- name: Wait for VMs to be added
+ ansible.builtin.async_status: "jid={{ item.ansible_job_id }}"
+ register: job_result
+ with_items: "{{ all_vms }}"
+ when: all_vms is defined
+ until: job_result.finished
+ retries: "{{ (vm_infra_create_all_timeout | int // vm_infra_create_poll_interval) + 1 }}"
+ delay: "{{ vm_infra_create_poll_interval }}"
+
+- name: Apply any Affinity Groups
+ import_tasks: affinity_groups.yml
+
+- name: Apply any Affinity Labels
+ import_tasks: affinity_labels.yml
+
+- name: Manage profile disks
+ ovirt_disk:
+ auth: "{{ ovirt_auth }}"
+ name: "{% if item.1.name_prefix | default(true) %}{{ item.0.name }}_{% endif %}{{ item.1.name }}"
+ vm_name: "{{ item.0.name }}"
+ id: "{{ item.1.id | default(omit) }}"
+ size: "{{ item.1.size | default(omit) }}"
+ format: "{{ item.1.format | default(omit) }}"
+ storage_domain: "{{ item.1.storage_domain | default(omit) }}"
+ interface: "{{ item.1.interface | default(omit) }}"
+ bootable: "{{ item.1.bootable | default(omit) }}"
+ activate: "{{ item.1.activate | default(omit) }}"
+ wait: true
+ # When there is no profile disks it will use vms disks
+ with_subelements:
+ - "{{ create_vms }}"
+ - "profile.disks"
+ - flags:
+ skip_missing: true
+
+- name: Manage virtual machines disks
+ ovirt_disk:
+ auth: "{{ ovirt_auth }}"
+ name: "{% if item.1.name_prefix | default(true) %}{{ item.0.name }}_{% endif %}{{ item.1.name }}"
+ vm_name: "{{ item.0.name }}"
+ id: "{{ item.1.id | default(omit) }}"
+ size: "{{ item.1.size | default(omit) }}"
+ format: "{{ item.1.format | default(omit) }}"
+ storage_domain: "{{ item.1.storage_domain | default(omit) }}"
+ interface: "{{ item.1.interface | default(omit) }}"
+ bootable: "{{ item.1.bootable | default(omit) }}"
+ activate: "{{ item.1.activate | default(omit) }}"
+ wait: true
+ # When there is profile with disks, vms disks will update them
+ with_subelements:
+ - "{{ create_vms }}"
+ - "disks"
+ - flags:
+ skip_missing: true
+
+- name: Manage profile NICs
+ ovirt_nic:
+ auth: "{{ ovirt_auth }}"
+ vm: "{{ item.0.name }}"
+ name: "{{ item.1.name | default(omit) }}"
+ interface: "{{ item.1.interface | default(omit) }}"
+ mac_address: "{{ item.1.mac_address | default(omit) }}"
+ profile: "{{ item.1.profile | default(omit) }}"
+ network: "{{ item.1.network | default(omit) }}"
+ # When there is no profile NICs it will use vms NICs
+ with_subelements:
+ - "{{ create_vms }}"
+ - "profile.nics"
+ - flags:
+ skip_missing: true
+
+- name: Manage virtual machines NICs
+ ovirt_nic:
+ auth: "{{ ovirt_auth }}"
+ vm: "{{ item.0.name }}"
+ name: "{{ item.1.name | default(omit) }}"
+ interface: "{{ item.1.interface | default(omit) }}"
+ mac_address: "{{ item.1.mac_address | default(omit) }}"
+ profile: "{{ item.1.profile | default(omit) }}"
+ network: "{{ item.1.network | default(omit) }}"
+ # When there is profile with nics, vms nics will update them
+ with_subelements:
+ - "{{ create_vms }}"
+ - "nics"
+ - flags:
+ skip_missing: true
+
+- name: Manage VMs state
+ include_tasks: manage_state.yml
+ with_items: "{{ create_sensitive_vms }}"
+ loop_control:
+ loop_var: "current_vm"
+ no_log: true
+
+- name: Wait for VMs to be started
+ no_log: "{{ not debug_vm_create }}"
+ ansible.builtin.async_status: "jid={{ item.ansible_job_id }}"
+ register: job_result
+ with_items: "{{ started_vms }}"
+ when: started_vms is defined
+ until: job_result.finished
+ retries: "{{ (vm_infra_create_all_timeout | int // vm_infra_create_poll_interval) + 1 }}"
+ delay: "{{ vm_infra_create_poll_interval }}"
+
+# to_json|from_json in vms is WA for: https://github.com/ansible/ansible/issues/27299
+- name: Apply tags from VM profiles
+ ovirt_tag:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item }}"
+ vms: "{{ create_vms | to_json | from_json | ovirt.ovirt.json_query(query) | map(attribute='name') | list }}"
+ with_items: "{{ create_vms | selectattr('tag', 'defined') | map(attribute='tag') | list | unique }}"
+ vars:
+ query: "[?contains(tag, '{{ item }}')]"
+
+- name: Apply tags from VMs
+ ovirt_tag:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item }}"
+ vms: "{{ create_vms | to_json | from_json | ovirt.ovirt.json_query(query) | map(attribute='name') | list }}"
+ with_items: "{{ create_vms | selectattr('profile', 'defined') | map(attribute='profile')
+ | selectattr('tag', 'defined') | map(attribute='tag') | list | unique }}"
+ vars:
+ query: "[?contains(profile.tag, '{{ item }}')]"
+ defined_vms: "{{ create_vms | selectattr('profile.tag', 'defined') | list | unique }}"
+
+
+- name: Wait for IP block
+ block:
+ - name: Filter ovirt_vms to get IP
+ ansible.builtin.set_fact:
+ ip_cond: "vm_info.ovirt_vms | ovirt.ovirt.ovirtvmip{{ wait_for_ip_version }}(network_ip='{{ wait_for_ip_range }}') | length > 0"
+
+ - name: Wait for VMs IP
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ item.name }}"
+ fetch_nested: true
+ nested_attributes: ips
+ with_items:
+ - "{{ create_vms }}"
+ until: "ip_cond"
+ retries: "{{ vm_infra_wait_for_ip_retries }}"
+ register: vm_info
+ delay: "{{ vm_infra_wait_for_ip_delay }}"
+ when: "(item.state is undefined and item.profile.state is defined and item.profile.state != 'stopped')
+ and (item.state is defined and item.state != 'stopped') | default('present') != 'stopped'"
+ # FIXME: Refactor the condition
+
+ - name: Create inventory
+ include_tasks: create_inventory.yml
+
+ when: "wait_for_ip"
diff --git a/ansible_collections/ovirt/ovirt/tests/.gitignore b/ansible_collections/ovirt/ovirt/tests/.gitignore
new file mode 100644
index 000000000..ea1472ec1
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/tests/.gitignore
@@ -0,0 +1 @@
+output/
diff --git a/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.10.txt b/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.10.txt
new file mode 100644
index 000000000..9d12b04e8
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.10.txt
@@ -0,0 +1,2 @@
+automation/build.sh shebang!skip
+build.sh shebang!skip
diff --git a/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.11.txt b/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.11.txt
new file mode 100644
index 000000000..cd443df48
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.11.txt
@@ -0,0 +1,13 @@
+automation/build.sh shebang!skip
+build.sh shebang!skip
+changelogs/fragments/.placeholder changelog!skip
+plugins/callback/stdout.py shebang!skip
+roles/disaster_recovery/files/fail_back.py shebang!skip
+roles/disaster_recovery/files/bcolors.py shebang!skip
+roles/disaster_recovery/files/fail_over.py shebang!skip
+roles/disaster_recovery/files/generate_mapping.py shebang!skip
+roles/disaster_recovery/files/generate_vars.py shebang!skip
+roles/disaster_recovery/files/generate_vars_test.py shebang!skip
+roles/disaster_recovery/files/validator.py shebang!skip
+roles/disaster_recovery/files/vault_secret.sh shellcheck!skip
+roles/disaster_recovery/files/ovirt-dr shebang!skip \ No newline at end of file
diff --git a/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.12.txt b/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.12.txt
new file mode 100644
index 000000000..cd66c06fc
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.12.txt
@@ -0,0 +1,14 @@
+automation/build.sh shebang!skip
+build.sh shebang!skip
+changelogs/fragments/.placeholder changelog!skip
+plugins/callback/stdout.py shebang!skip
+roles/disaster_recovery/files/fail_back.py shebang!skip
+roles/disaster_recovery/files/bcolors.py shebang!skip
+roles/disaster_recovery/files/fail_over.py shebang!skip
+roles/disaster_recovery/files/generate_mapping.py shebang!skip
+roles/disaster_recovery/files/generate_vars.py shebang!skip
+roles/disaster_recovery/files/generate_vars_test.py shebang!skip
+roles/disaster_recovery/files/validator.py shebang!skip
+roles/disaster_recovery/files/vault_secret.sh shellcheck!skip
+roles/disaster_recovery/files/ovirt-dr shebang!skip
+plugins/module_utils/cloud.py pylint!skip
diff --git a/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.13.txt b/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.13.txt
new file mode 100644
index 000000000..9428c0440
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.13.txt
@@ -0,0 +1,16 @@
+automation/build.sh shebang!skip
+build.sh shebang!skip
+changelogs/fragments/.placeholder changelog!skip
+plugins/callback/stdout.py shebang!skip
+plugins/callback/stdout.py validate-modules!skip
+plugins/inventory/ovirt.py validate-modules!skip
+roles/disaster_recovery/files/fail_back.py shebang!skip
+roles/disaster_recovery/files/bcolors.py shebang!skip
+roles/disaster_recovery/files/fail_over.py shebang!skip
+roles/disaster_recovery/files/generate_mapping.py shebang!skip
+roles/disaster_recovery/files/generate_vars.py shebang!skip
+roles/disaster_recovery/files/generate_vars_test.py shebang!skip
+roles/disaster_recovery/files/validator.py shebang!skip
+roles/disaster_recovery/files/vault_secret.sh shellcheck!skip
+roles/disaster_recovery/files/ovirt-dr shebang!skip
+plugins/module_utils/cloud.py pylint!skip
diff --git a/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.9.txt b/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.9.txt
new file mode 100644
index 000000000..9d12b04e8
--- /dev/null
+++ b/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.9.txt
@@ -0,0 +1,2 @@
+automation/build.sh shebang!skip
+build.sh shebang!skip