summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/ovirt
diff options
context:
space:
mode:
Diffstat (limited to 'collections-debian-merged/ansible_collections/ovirt')
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/.github/ISSUE_TEMPLATE/bug_report.md27
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/.github/ISSUE_TEMPLATE/feature_request.md22
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/.gitignore9
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/CHANGELOG.rst229
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/FILES.json3239
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/MANIFEST.json33
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/README-developers.md4
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/README.md89
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/automation.yaml4
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/automation/README.md8
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts-manual.packages8
-rwxr-xr-xcollections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts-manual.sh77
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts.packages8
-rwxr-xr-xcollections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts.sh77
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/automation/check-patch.packages8
-rwxr-xr-xcollections-debian-merged/ansible_collections/ovirt/ovirt/automation/check-patch.sh77
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/bindep.txt9
-rwxr-xr-xcollections-debian-merged/ansible_collections/ovirt/ovirt/build.sh81
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/README.md28
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/changelog.yaml357
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/config.yaml31
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/fragments/.gitignore0
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/examples/filters/ovirtdiff.yml33
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/examples/filters/vmips.yml77
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/examples/ovirt_ansible_collections.yml21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/licenses/Apache-license.txt191
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/licenses/GPL-license.txt674
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/meta/runtime.yml82
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec142
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec.in142
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/callback/stdout.py56
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt.py104
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt_info.py89
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.py147
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/inventory/ovirt.py270
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/cloud.py208
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/ovirt.py874
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_group.py340
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label.py217
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label_info.py175
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_api_info.py84
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_auth.py310
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster.py792
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster_info.py113
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter.py328
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter_info.py97
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk.py921
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_info.py112
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event.py234
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event_info.py152
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider.py424
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider_info.py153
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group.py187
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group_info.py109
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host.py711
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_info.py135
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_network.py607
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_pm.py267
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_storage_info.py176
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_instance_type.py632
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_job.py237
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_mac_pool.py186
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network.py380
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network_info.py113
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic.py344
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic_info.py148
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission.py329
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission_info.py157
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota.py331
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota_info.py130
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_role.py195
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_scheduling_policy_info.py128
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot.py556
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot_info.py127
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_connection.py299
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain.py821
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain_info.py113
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_template_info.py130
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_vm_info.py130
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_system_option_info.py117
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag.py265
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag_info.py158
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template.py1086
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template_info.py113
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user.py178
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user_info.py109
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm.py2784
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_info.py171
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_os_info.py123
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool.py491
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool_info.py111
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile.py330
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile_info.py114
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/test/ovirt_proxied_check.py46
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/requirements.txt1
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/README.md52
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/defaults/main.yml16
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/cluster_upgrade.yml26
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/passwords.yml12
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/cluster_policy.yml24
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/main.yml208
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/pinned_vms.yml16
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/upgrade.yml100
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/README.md77
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/defaults/main.yml37
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/disaster_recovery_vars.yml109
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_ovirt_setup.yml11
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_play.yml8
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/ovirt_passwords.yml13
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/bcolors.py21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/dr.conf21
-rwxr-xr-xcollections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_back.py289
-rwxr-xr-xcollections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_over.py244
-rwxr-xr-xcollections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_mapping.py445
-rwxr-xr-xcollections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars.py319
-rwxr-xr-xcollections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars_test.py38
-rwxr-xr-xcollections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/ovirt-dr159
-rwxr-xr-xcollections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/validator.py732
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/vault_secret.sh1
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_disks.yml10
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain.yml20
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain_process.yml49
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml20
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml26
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_vms.yml10
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vm.yml12
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vms.yml21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/update_ovf_store.yml18
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean_engine.yml128
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/generate_mapping.yml6
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/main.yml33
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_domain.yml55
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml30
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml30
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml58
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml26
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml31
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/print_info.yml16
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_template.yml27
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_templates.yml21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vm.yml31
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vms.yml24
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/report_log_template.j224
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/run_vms.yml21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover_engine.yml211
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/run_unregistered_entities.yml40
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/unregister_entities.yml60
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/README.md167
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/defaults/main.yml39
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-deploy.yml18
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-upgrade.yml19
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/passwords.yml12
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/engine_setup.yml117
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/install_packages.yml25
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/pre_install_checks.yml12
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/restore_engine_from_file.yml20
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_basic.txt.j23
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_upgrade.txt.j21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_basic.txt.j214
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_upgrade.txt.j216
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_basic.txt.j211
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_upgrade.txt.j21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_basic.txt.j23
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_upgrade.txt.j21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/basic_answerfile.txt.j264
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/containers-deploy.yml17
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-deploy.yml17
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-upgrade.yml17
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/inventory4
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/passwords.yml12
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/requirements.yml4
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-4.2.yml6
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-master.yml6
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-upgrade-4.2-to-master.yml10
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/vars/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/README.md376
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/defaults/main.yml112
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_localhost.yml8
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_remotehost.yml7
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/iscsi_deployment_remote.json18
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/nfs_deployment.json8
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/passwords.yml13
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/required_networks_fix.yml56
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_add_host/README.md3
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/README.md3
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/add_host_storage_domain.yml53
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_after_engine_setup/README.md3
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_before_engine_setup/README.md3
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/add_engine_as_ansible_host.yml24
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/alter_libvirt_default_net_configuration.yml99
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/apply_openscap_profile.yml48
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_revoke.yml6
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_sso.yml13
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/01_prepare_routing_rules.yml110
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/02_create_local_vm.yml144
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml91
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/04_engine_final_tasks.yml78
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/05_add_host.yml281
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_local_storage_pools.yml28
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_localvm_dir.yml12
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_storage_domain.yml182
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/01_create_target_hosted_engine_vm.yml186
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/02_engine_vm_configuration.yml91
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/03_hosted_engine_final_tasks.yml431
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fc_getdevices.yml11
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_engine_logs.yml29
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_host_ip.yml47
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_team_devices.yml35
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/final_clean.yml11
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/full_execution.yml73
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_local_vm_disk_path.yml13
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/initial_clean.yml123
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_appliance.yml36
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_packages.yml9
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/ipv_switch.yml11
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_discover.yml39
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_getdevices.yml34
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/main.yml19
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/partial_execution.yml152
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pause_execution.yml13
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/001_validate_network_interfaces.yml79
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/002_validate_hostname_tasks.yml118
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/define_variables.yml63
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_data_center_name.yml15
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_firewalld.yml14
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_gateway.yml29
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_mac_address.yml17
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_memory_size.yml54
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_network_test.yml34
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_services_status.yml17
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_vcpus_count.yml21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_backup.yml108
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/search_available_network_subnet.yaml60
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/sync_on_engine_machine.yml9
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_ip_prefix.yml23
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/broker.conf.j28
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/fhanswers.conf.j266
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/hosted-engine.conf.j244
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-dhcp.j212
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static-ipv6.j224
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static.j225
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/meta-data.j22
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config-dhcp.j211
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/user-data.j220
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/version.j21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/vm.conf.j217
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/README.md157
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/defaults/main.yml21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/examples/ovirt_image_template.yml27
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/examples/passwords.yml12
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/empty.yml8
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/glance_image.yml60
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/qcow2_image.yml254
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/vars/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/README.md436
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra.yml15
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra_destroy.yml43
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/ovirt_infra_vars.yml108
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/passwords.yml12
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/README.md60
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/tasks/main.yml88
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/README.md106
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/tasks/main.yml45
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/vars/main.yml27
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/README.md30
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/cluster_and_hosts.yml21
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/datacenter.yml6
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/disks.yml16
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/main.yml39
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_last.yml11
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_pre.yml28
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/templates.yml16
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vm_pools.yml15
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vms.yml15
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/README.md56
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/tasks/main.yml28
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/README.md73
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/tasks/main.yml18
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/README.md41
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/tasks/main.yml86
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/README.md39
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/tasks/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/README.md77
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/tasks/main.yml41
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/README.md49
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/tasks/main.yml40
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/README.md65
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/tasks/main.yml103
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/create_infra.yml37
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/main.yml48
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/remove_infra.yml4
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/README.md242
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/defaults/main.yml81
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/cfme.yml50
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/manageiq.yml29
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/passwords.yml12
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/cfme_add_disk.yml16
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/cfme_identify_disk_device.yml78
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/deploy_qcow2.yml86
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/init_cfme.yml66
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/main.yml111
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/manage_appliance_roles.yml33
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/wait_for_api.yml68
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/templates/add_rhv_provider.j243
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/vars/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/README.md84
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/defaults/main.yml13
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_release_rpm.yml11
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_subscription_manager.yml22
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/passwords.yml13
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/backup-repos.yml26
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/main.yml19
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rh-subscription.yml66
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rpm.yml20
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/default.yml1
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.1.yml6
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.2.yml7
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.3.yml7
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.4.yml7
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.1.yml3
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.2.yml4
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.3.yml4
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.4.yml7
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.1.yml2
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.2.yml2
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.3.yml2
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.4.yml2
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/README.md53
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/passwords.yml12
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/shutdown_env.yml18
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/tasks/main.yml226
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/README.md324
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/defaults/main.yml15
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra.yml48
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra_inv.yml72
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/passwords.yml12
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_groups.yml22
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_labels.yml13
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_inventory.yml44
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_vms.yml48
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/main.yml62
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/manage_state.yml80
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_absent.yml6
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_present.yml157
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/tests/.gitignore1
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.10.txt4
-rw-r--r--collections-debian-merged/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.9.txt4
357 files changed, 39572 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/.github/ISSUE_TEMPLATE/bug_report.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 00000000..ed5be98e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,27 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: bug
+assignees: mnecas
+
+---
+
+##### SUMMARY
+
+##### COMPONENT NAME
+
+##### STEPS TO REPRODUCE
+
+```yaml
+
+```
+
+##### EXPECTED RESULTS
+
+
+##### ACTUAL RESULTS
+
+```paste below
+
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/.github/ISSUE_TEMPLATE/feature_request.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 00000000..aed442c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,22 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: enhancement
+assignees: mnecas
+
+---
+
+##### SUMMARY
+<!--- Describe the new feature/improvement briefly below -->
+
+##### COMPONENT NAME
+<!--- Write the short name of the module -->
+
+##### ADDITIONAL INFORMATION
+<!--- Describe how the feature would be used, why it is needed and what it would solve -->
+
+<!--- Paste example playbooks -->
+```yaml
+
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/.gitignore b/collections-debian-merged/ansible_collections/ovirt/ovirt/.gitignore
new file mode 100644
index 00000000..35fcd721
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/.gitignore
@@ -0,0 +1,9 @@
+*retry
+.tox
+*.tar.gz
+output/
+ovirt-ansible-collection.spec
+exported-artifacts/
+.idea
+
+changelogs/.plugin-cache.yaml
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/CHANGELOG.rst b/collections-debian-merged/ansible_collections/ovirt/ovirt/CHANGELOG.rst
new file mode 100644
index 00000000..31aa9c57
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/CHANGELOG.rst
@@ -0,0 +1,229 @@
+=========================
+ovirt.ovirt Release Notes
+=========================
+
+.. contents:: Topics
+
+
+v1.3.0
+======
+
+Major Changes
+-------------
+
+- ovirt_system_option_info - Add new module (https://github.com/oVirt/ovirt-ansible-collection/pull/206).
+
+Minor Changes
+-------------
+
+- ansible-builder - Update bindep (https://github.com/oVirt/ovirt-ansible-collection/pull/197).
+- hosted_engine_setup - Collect all engine /var/log (https://github.com/oVirt/ovirt-ansible-collection/pull/202).
+- hosted_engine_setup - Use ovirt_system_option_info instead of REST API (https://github.com/oVirt/ovirt-ansible-collection/pull/209).
+- ovirt_disk - Add install warning (https://github.com/oVirt/ovirt-ansible-collection/pull/208).
+- ovirt_info - Fragment add auth suboptions to documentation (https://github.com/oVirt/ovirt-ansible-collection/pull/205).
+
+v1.2.4
+======
+
+Minor Changes
+-------------
+
+- infra - don't require passowrd for user (https://github.com/oVirt/ovirt-ansible-collection/pull/195).
+- inventory - correct os_type name (https://github.com/oVirt/ovirt-ansible-collection/pull/194).
+- ovirt_disk - automatically detect virtual size of qcow image (https://github.com/oVirt/ovirt-ansible-collection/pull/183).
+
+v1.2.3
+======
+
+Minor Changes
+-------------
+
+- engine_setup - Add missing restore task file and vars file (https://github.com/oVirt/ovirt-ansible-collection/pull/180).
+- hosted_engine_setup - Add after_add_host hook (https://github.com/oVirt/ovirt-ansible-collection/pull/181).
+
+v1.2.2
+======
+
+Bugfixes
+--------
+
+- hosted_engine_setup - Clean VNC encryption config (https://github.com/oVirt/ovirt-ansible-collection/pull/175/).
+- inventory plugin - Fix timestamp for Python 2 (https://github.com/oVirt/ovirt-ansible-collection/pull/173).
+
+v1.2.1
+======
+
+Bugfixes
+--------
+
+- disaster_recovery - Fix multiple configuration issues like paths, "~" support, user input messages, etc. (https://github.com/oVirt/ovirt-ansible-collection/pull/160).
+
+v1.2.0
+======
+
+Major Changes
+-------------
+
+- cluster_upgrade - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/94).
+- disaster_recovery - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/134).
+- engine_setup - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/69).
+- hosted_engine_setup - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/106).
+- image_template - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/95).
+- infra - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/92).
+- manageiq - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/97).
+- repositories - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/96).
+- shutdown_env - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/112).
+- vm_infra - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/93).
+
+Minor Changes
+-------------
+
+- Add GPL license (https://github.com/oVirt/ovirt-ansible-collection/pull/101).
+- hosted_engine_setup - Add compatibility_version (https://github.com/oVirt/ovirt-ansible-collection/pull/125).
+- ovirt_disk - ignore move of HE disks (https://github.com/oVirt/ovirt-ansible-collection/pull/162).
+- ovirt_nic - Add template_version (https://github.com/oVirt/ovirt-ansible-collection/pull/145).
+- ovirt_nic_info - Add template (https://github.com/oVirt/ovirt-ansible-collection/pull/146).
+- ovirt_vm_info - Add current_cd (https://github.com/oVirt/ovirt-ansible-collection/pull/144).
+
+Bugfixes
+--------
+
+- 01_create_target_hosted_engine_vm - Force basic authentication (https://github.com/oVirt/ovirt-ansible-collection/pull/131).
+- hosted_engine_setup - Allow uppercase characters in mac address (https://github.com/oVirt/ovirt-ansible-collection/pull/150).
+- hosted_engine_setup - set custom bios type of hosted-engine VM to Q35+SeaBIOS (https://github.com/oVirt/ovirt-ansible-collection/pull/129).
+- hosted_engine_setup - use zcat instead of gzip (https://github.com/oVirt/ovirt-ansible-collection/pull/130).
+- ovirt inventory - Add close of connection at the end (https://github.com/oVirt/ovirt-ansible-collection/pull/122).
+- ovirt_disk - dont move disk when already in storage_domain (https://github.com/oVirt/ovirt-ansible-collection/pull/135)
+- ovirt_disk - fix upload when direct upload fails (https://github.com/oVirt/ovirt-ansible-collection/pull/120).
+- ovirt_vm - Fix template search (https://github.com/oVirt/ovirt-ansible-collection/pull/132).
+- ovirt_vm - Rename q35_sea to q35_sea_bios (https://github.com/oVirt/ovirt-ansible-collection/pull/111).
+
+v1.1.2
+======
+
+v1.1.1
+======
+
+Minor Changes
+-------------
+
+- ovirt_permission - Fix FQCN documentation (https://github.com/oVirt/ovirt-ansible-collection/pull/63).
+
+v1.1.0
+======
+
+Major Changes
+-------------
+
+- ovirt_disk - Add backup (https://github.com/oVirt/ovirt-ansible-collection/pull/57).
+- ovirt_disk - Support direct upload/download (https://github.com/oVirt/ovirt-ansible-collection/pull/35).
+- ovirt_host - Add ssh_port (https://github.com/oVirt/ovirt-ansible-collection/pull/60).
+- ovirt_vm_os_info - Creation of module (https://github.com/oVirt/ovirt-ansible-collection/pull/26).
+
+Minor Changes
+-------------
+
+- ovirt inventory - Add creation_time (https://github.com/oVirt/ovirt-ansible-collection/pull/34).
+- ovirt inventory - Set inventory plugin insecure if no cafile defined (https://github.com/oVirt/ovirt-ansible-collection/pull/58).
+- ovirt_disk - Add upload image warning for correct format (https://github.com/oVirt/ovirt-ansible-collection/pull/22).
+- ovirt_disk - Force wait when uploading disk (https://github.com/oVirt/ovirt-ansible-collection/pull/43).
+- ovirt_disk - Upload_image_path autodetect size (https://github.com/oVirt/ovirt-ansible-collection/pull/19).
+- ovirt_network - Add support of removing vlan_tag (https://github.com/oVirt/ovirt-ansible-collection/pull/21).
+- ovirt_vm - Add documentation for custom_script under sysprep (https://github.com/oVirt/ovirt-ansible-collection/pull/52).
+- ovirt_vm - Hard code nic on_boot to true (https://github.com/oVirt/ovirt-ansible-collection/pull/45).
+
+Bugfixes
+--------
+
+- ovirt_disk - Fix activate (https://github.com/oVirt/ovirt-ansible-collection/pull/61).
+- ovirt_host_network - Fix custom_properties default value (https://github.com/oVirt/ovirt-ansible-collection/pull/65).
+- ovirt_quota - Fix vcpu_limit (https://github.com/oVirt/ovirt-ansible-collection/pull/44).
+- ovirt_vm - Fix cd_iso get all disks from storage domains (https://github.com/oVirt/ovirt-ansible-collection/pull/66).
+- ovirt_vm - Fix cd_iso search by name (https://github.com/oVirt/ovirt-ansible-collection/pull/51).
+
+New Modules
+-----------
+
+- ovirt.ovirt.ovirt_vm_os_info - Retrieve information on all supported oVirt/RHV operating systems
+
+v1.0.0
+======
+
+Minor Changes
+-------------
+
+- ovirt_cluster - Add migration_encrypted option (https://github.com/oVirt/ovirt-ansible-collection/pull/17).
+- ovirt_vm - Add bios_type (https://github.com/oVirt/ovirt-ansible-collection/pull/15).
+
+Bugfixes
+--------
+
+- ovirt_snapshot - Disk id was incorrectly set as disk_snapshot_id (https://github.com/oVirt/ovirt-ansible-collection/pull/5).
+- ovirt_storage_domain - Fix update_check warning_low_space (https://github.com/oVirt/ovirt-ansible-collection/pull/10).
+- ovirt_vm - Remove deprecated warning of boot params (https://github.com/oVirt/ovirt-ansible-collection/pull/3).
+
+New Plugins
+-----------
+
+Inventory
+~~~~~~~~~
+
+- ovirt.ovirt.ovirt - oVirt inventory source
+
+New Modules
+-----------
+
+- ovirt.ovirt.ovirt_affinity_group - Module to manage affinity groups in oVirt/RHV
+- ovirt.ovirt.ovirt_affinity_label - Module to manage affinity labels in oVirt/RHV
+- ovirt.ovirt.ovirt_affinity_label_info - Retrieve information about one or more oVirt/RHV affinity labels
+- ovirt.ovirt.ovirt_api_info - Retrieve information about the oVirt/RHV API
+- ovirt.ovirt.ovirt_auth - Module to manage authentication to oVirt/RHV
+- ovirt.ovirt.ovirt_cluster - Module to manage clusters in oVirt/RHV
+- ovirt.ovirt.ovirt_cluster_info - Retrieve information about one or more oVirt/RHV clusters
+- ovirt.ovirt.ovirt_datacenter - Module to manage data centers in oVirt/RHV
+- ovirt.ovirt.ovirt_datacenter_info - Retrieve information about one or more oVirt/RHV datacenters
+- ovirt.ovirt.ovirt_disk - Module to manage Virtual Machine and floating disks in oVirt/RHV
+- ovirt.ovirt.ovirt_disk_info - Retrieve information about one or more oVirt/RHV disks
+- ovirt.ovirt.ovirt_event - Create or delete an event in oVirt/RHV
+- ovirt.ovirt.ovirt_event_info - This module can be used to retrieve information about one or more oVirt/RHV events
+- ovirt.ovirt.ovirt_external_provider - Module to manage external providers in oVirt/RHV
+- ovirt.ovirt.ovirt_external_provider_info - Retrieve information about one or more oVirt/RHV external providers
+- ovirt.ovirt.ovirt_group - Module to manage groups in oVirt/RHV
+- ovirt.ovirt.ovirt_group_info - Retrieve information about one or more oVirt/RHV groups
+- ovirt.ovirt.ovirt_host - Module to manage hosts in oVirt/RHV
+- ovirt.ovirt.ovirt_host_info - Retrieve information about one or more oVirt/RHV hosts
+- ovirt.ovirt.ovirt_host_network - Module to manage host networks in oVirt/RHV
+- ovirt.ovirt.ovirt_host_pm - Module to manage power management of hosts in oVirt/RHV
+- ovirt.ovirt.ovirt_host_storage_info - Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)
+- ovirt.ovirt.ovirt_instance_type - Module to manage Instance Types in oVirt/RHV
+- ovirt.ovirt.ovirt_job - Module to manage jobs in oVirt/RHV
+- ovirt.ovirt.ovirt_mac_pool - Module to manage MAC pools in oVirt/RHV
+- ovirt.ovirt.ovirt_network - Module to manage logical networks in oVirt/RHV
+- ovirt.ovirt.ovirt_network_info - Retrieve information about one or more oVirt/RHV networks
+- ovirt.ovirt.ovirt_nic - Module to manage network interfaces of Virtual Machines in oVirt/RHV
+- ovirt.ovirt.ovirt_nic_info - Retrieve information about one or more oVirt/RHV virtual machine network interfaces
+- ovirt.ovirt.ovirt_permission - Module to manage permissions of users/groups in oVirt/RHV
+- ovirt.ovirt.ovirt_permission_info - Retrieve information about one or more oVirt/RHV permissions
+- ovirt.ovirt.ovirt_quota - Module to manage datacenter quotas in oVirt/RHV
+- ovirt.ovirt.ovirt_quota_info - Retrieve information about one or more oVirt/RHV quotas
+- ovirt.ovirt.ovirt_role - Module to manage roles in oVirt/RHV
+- ovirt.ovirt.ovirt_scheduling_policy_info - Retrieve information about one or more oVirt scheduling policies
+- ovirt.ovirt.ovirt_snapshot - Module to manage Virtual Machine Snapshots in oVirt/RHV
+- ovirt.ovirt.ovirt_snapshot_info - Retrieve information about one or more oVirt/RHV virtual machine snapshots
+- ovirt.ovirt.ovirt_storage_connection - Module to manage storage connections in oVirt
+- ovirt.ovirt.ovirt_storage_domain - Module to manage storage domains in oVirt/RHV
+- ovirt.ovirt.ovirt_storage_domain_info - Retrieve information about one or more oVirt/RHV storage domains
+- ovirt.ovirt.ovirt_storage_template_info - Retrieve information about one or more oVirt/RHV templates relate to a storage domain.
+- ovirt.ovirt.ovirt_storage_vm_info - Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain.
+- ovirt.ovirt.ovirt_tag - Module to manage tags in oVirt/RHV
+- ovirt.ovirt.ovirt_tag_info - Retrieve information about one or more oVirt/RHV tags
+- ovirt.ovirt.ovirt_template - Module to manage virtual machine templates in oVirt/RHV
+- ovirt.ovirt.ovirt_template_info - Retrieve information about one or more oVirt/RHV templates
+- ovirt.ovirt.ovirt_user - Module to manage users in oVirt/RHV
+- ovirt.ovirt.ovirt_user_info - Retrieve information about one or more oVirt/RHV users
+- ovirt.ovirt.ovirt_vm - Module to manage Virtual Machines in oVirt/RHV
+- ovirt.ovirt.ovirt_vm_info - Retrieve information about one or more oVirt/RHV virtual machines
+- ovirt.ovirt.ovirt_vmpool - Module to manage VM pools in oVirt/RHV
+- ovirt.ovirt.ovirt_vmpool_info - Retrieve information about one or more oVirt/RHV vmpools
+- ovirt.ovirt.ovirt_vnic_profile - Module to manage vNIC profile of network in oVirt/RHV
+- ovirt.ovirt.ovirt_vnic_profile_info - Retrieve information about one or more oVirt/RHV vnic profiles
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/FILES.json b/collections-debian-merged/ansible_collections/ovirt/ovirt/FILES.json
new file mode 100644
index 00000000..8f4266b5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/FILES.json
@@ -0,0 +1,3239 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/bug_report.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af74c477a29c04dac61f1439666024800f9e657c503b770f3d52d5fa41f7ad35",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/feature_request.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b47364a8e844cd65557ba6e5aec0ed5f1823d3722eab751e28ea5f4b9f6207c",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "607b2a713229b2851a3204d066179352c92084707efefda0d506bdcd465bf9de",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf4af969070bf0013dbca817e2c54e80a123bf6043dba4fd0db3935d18323827",
+ "format": 1
+ },
+ {
+ "name": "README-developers.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "81e38bf32f2a201d965eb10891068c1a56cc43e6ffd83c07f3f95442a1ab0e59",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98d351170c1abbb68f6180cb76dd4a974bc671445cb673465d51b6cd08d4a8c1",
+ "format": 1
+ },
+ {
+ "name": "automation.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a66af564824c2e191d79f94c95a5250b1559b3232667b43c6c49e9afc19b0bc0",
+ "format": 1
+ },
+ {
+ "name": "automation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "automation/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c29749d822aebf8e458fb5dddef632ad990b77ec16543ba0984589ab53064608",
+ "format": 1
+ },
+ {
+ "name": "automation/build-artifacts-manual.packages",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c82688ccd28cc9fea9edc2f8808d249a8b1c31825555ddad5c2f8df683cd6f31",
+ "format": 1
+ },
+ {
+ "name": "automation/build-artifacts-manual.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54bcf674893e33fdb22a96230997383daa6f35cca6354d80780806ed4ffcd911",
+ "format": 1
+ },
+ {
+ "name": "automation/build-artifacts.packages",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c82688ccd28cc9fea9edc2f8808d249a8b1c31825555ddad5c2f8df683cd6f31",
+ "format": 1
+ },
+ {
+ "name": "automation/build-artifacts.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54bcf674893e33fdb22a96230997383daa6f35cca6354d80780806ed4ffcd911",
+ "format": 1
+ },
+ {
+ "name": "automation/check-patch.packages",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c82688ccd28cc9fea9edc2f8808d249a8b1c31825555ddad5c2f8df683cd6f31",
+ "format": 1
+ },
+ {
+ "name": "automation/check-patch.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54bcf674893e33fdb22a96230997383daa6f35cca6354d80780806ed4ffcd911",
+ "format": 1
+ },
+ {
+ "name": "bindep.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ed8b55df937ac74f52ef12347af9d99ec34e29cbcf2f1449cb0586470bde99e",
+ "format": 1
+ },
+ {
+ "name": "build.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b43a50bb2e6dd4c30da1aa3557abf50255348ed0611e89c8f830e7a73cf5c606",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecc6052d7e1c1e65fc3b7667b7db34c0d288da248125ce36cadba74f426a2c54",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9014708c0bb19b8c9d20f79c6938928c4c8a3631cf4d2a30905f8a0dd94ab60",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9855447b14e048a16cd7877ffeab3bfe07496680c55055a3e8de8c0d2fb64bd",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/.gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "examples/filters",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "examples/filters/ovirtdiff.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66d5ef341d6075c3bf9671fd5b25f41642ef94127ca295e69901b41da9242e2d",
+ "format": 1
+ },
+ {
+ "name": "examples/filters/vmips.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3c0ede23c25926f83af778160d4b52ea6c11e1dde1e97233dfff27ab8ab835b",
+ "format": 1
+ },
+ {
+ "name": "examples/ovirt_ansible_collections.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e81805423a3ebec7b37b6d380a6fa76732fb3325f3af170eb498c481ddad1873",
+ "format": 1
+ },
+ {
+ "name": "licenses",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "licenses/Apache-license.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6d5b461deb8038ce0e083c9cb7f59859caa04c9b4f72149367393e9b252cf14",
+ "format": 1
+ },
+ {
+ "name": "licenses/GPL-license.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ceb4b9ee5adedde47b31e975c1d90c73ad27b6b165a1dcd80c7c545eb65b903",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25354b3afabd2b5a0c3e209aeb30b9002752345651a4dbd6e74adcc0291999c2",
+ "format": 1
+ },
+ {
+ "name": "ovirt-ansible-collection.spec.in",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f31d2e4b70249b47fbd8fc4f254f04375864d201a6b3e7cba1961d8f4534c8b1",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/callback",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/stdout.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5a5a6113c6114dfee3a90c358c0f88cea9e046d6a2f25605c7d6d7409fd5900",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/ovirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "969ad2688d121bc449f61608351ed7d344a6803ace1f586da3cbe161a934289b",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/ovirt_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f146cd225ca2cbb589a39e124c1e86325ffea948d5a96090b93ea6b502974ed",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/ovirtvmip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7512359e1bd62b76559ab193261c6baa2fe9de569f97e30123f30b5147b7556c",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/ovirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c49b257259134ecc64d16c6b76f9179f1de4de8273637ebbce4c1e06bd7169dd",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/cloud.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91470f6d70e6fbb6bf1ccde2113f13262d094147e2ec3113416ea8d613f5bbd3",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ovirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "545a5735cbc99bdf6ac6cf54f00b85807dbdac725490baf0783f8c86fb147537",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_affinity_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "356227080754100c98910c7066f53e8d2f43dfce7a848fb30bbbf370dcbdf45d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_affinity_label.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c612b78a1282cf0ada1e46779879ba79dfcbeec9f6919bfc5c688894513a505b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_affinity_label_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "149dd3db46d6f9c054b296a1fcfe19f99dc9d540e049102d07544dea3e4bf993",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_api_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fcf3fa080b860a5186fa525a36d0d0f83ee3d502ac26eba7fbb8800b44b8fcd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_auth.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9eb2c9c68b7f5e9ea2c4d93f86c45f9bbf4d9a772e974fab4cc416338ac05cfd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_cluster.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94eb003ab55cdadc7a2afe758661f470b7e3af84da4687d21202b46a14a3208b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_cluster_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2a8b7e70aa1afb32978a229a04d10e4eed868fc7cac23c47b4095f973673041",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_datacenter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50769ca21aedbb66a2413c9a6991d0f54aebf57cb0b5482fa1d300bf105dccd5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_datacenter_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f0457a889a93ea6275760176be3dafe90b64765f375ae3c66a142bd6b1780ff",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_disk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b91e82f7ce6af3c57a285802a6c1ce0580d730330af8c879cbac1cf19a89aea",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_disk_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "626e7e2e5b25830810c396ca24ad625faff53ea0deef990433e74d8fd4c5932a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_event.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bcad4a0c8c95b81208cb414e09990ec622b1946247fa62bcfadac3db501aeac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_event_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "213973b1ff4911dfa3dc5585e61bb423bd3f9128f348fb0bae1ed85994190c1c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_external_provider.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2253c69cd894e5d8b1d13b19e374cfbd8290ee683058ef333b6119c16828df7b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_external_provider_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65d97e66438a52afc3b372cb5f4a8091783d72c0ce8c6e779ae36cf6cda34602",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7cd6538d4e18d19e816ca078ea597702b596cbea738ca4bce9f2cde20745b9fb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d9330af75d0dee6a98ad2c764af7447c6a8d602e93ff60019e834bf6d47247d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e41629d0557d414247fa1120f3da81e056993a2e888fdbcc2b64ac60993d115",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83c19fb621ada95d5142fb4948dd384b8332210dec9eebe5eda623c33a60af39",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9f283291e0c66aea729c6f18d9905cd2ba7e0c598ae0636c493f3068a610584",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host_pm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51ec49b40fa65d456a38048f325df7982c7debdeb48bd137318c30240f155b29",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host_storage_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57f8df610cdb0f56051a4738d905683c9a2bc7d46ced2054728b0f0f9804b2f1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_instance_type.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1856d38726508ff171c3b8d2d5aea7342a2e62c254de87a28cb7ddc1b090752",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_job.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d92d289cc7309d7304cf5d5601fd0806b9cef7e179fd16bdf4ed45e43912d51",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_mac_pool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c5822a799199e4d7d6e59ec7c0f67a3e56e5728ac0aa5a217b4746f7d4155dc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74b1b135135714e20a39b82d4ba80b78a146b57bd7f09ecf8baf7f2b4a29eacc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9521af5e52a0f83b60b680ff658ff00cce733dfd0d3e2542487e18642b01f415",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_nic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35d865a6a853c47b0ea6b5228ad27f8cc680bf48ecc14520e90e9a5cac4c1dfa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_nic_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0f24d8023f77f8d3a8844ef751780417711fd2d2e11183af54dbd596295ea0e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_permission.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "750a5c9a2478519ede7aa1bbb8b4adc7f3078040b392469fb90521efb8faa311",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_permission_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cd1913108cdcf57ebf6fca236bb5e1fe6f2d869e47424739611c2c35e3ae547",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_quota.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7560839553cf19fbb2cdab3784f2c19e7dc2ed6beda2dcf9137b57156ea7d004",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_quota_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8faa99b500b8e9636885a1903eb44fd14877272fd254c0054aa59c353b3b361e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f46687d66c7aa5e4e0242f48c4f27cf5a3ea8adba7c0fe6170ad6f72d0bfade1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_scheduling_policy_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2df2893f226eb74ab4d5f876105de61f190ff55c5ea7eba56ec201d4db8c49c6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e7483683b0badf4f3f962a774d0ee9dc39e15c9a77bf3468856d3ddf4179faf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_snapshot_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bb9d7a26818f289c30da690d22858db6fc684f23ed668272eaf2dc85e80328c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_connection.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5252d0c3b3086a4e429807f10d3b7577f2681673f559c363ac297bb31f7f191a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_domain.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b697a4f9b4284a1a3c8c8ee76e154f8dcb0fa4333bb4c6978da4193e9f5e752",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_domain_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e61f1912371c9489bfa94087c46e80e350c0c1e3875cd97be915a8ca4b57ea6f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_template_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3af9723c461c71f0d3203f75cd4b0e5150987bcbf5d459f9f3221e17b99b143",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_vm_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdcebaf82ce6a5fbc188f9f7045083dcd66ea2cb394375a0ec3592ae657cde48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_system_option_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28bc183c43a51f1f899f55f7b2248cbbc160449894d14e130ac3bad6b4b2560f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_tag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41961b31845824591a0691d386e9a8f8ac2c1b85415b522109a8f1f4e798e161",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_tag_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22970c74cbbed22aae3b5b6ff50f5a8ffcf0c930a417dffea9a42cdc1768ddb5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8acfa24a7909f92b93fa04464ea5dddc44e832af9b4d759c028e810ecb9bccf4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_template_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41388c8bfbd0daf9a473b8767fa1a0e70ab674f120bdc78293b3203aa377fa49",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ade649d12e2e32db210881c5537ea58b7e651630465ae28110324fd91c2ad1f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "384bd8c82302bed7b8ae17c435879ba41f8d27e4be9e74fc9e1e62e7b5d44af5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb5353cf334ef987d28df394ff9feb6b913737a9f008df6c5e524efebcb41804",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vm_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b31ade1b2c0bb0c3761d79b00d01e391043b323352e612fc8b20bc49faa25a2b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vm_os_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa981290d946de51a12ff0cb213a21c7197d89328543e5befcf94d334532d478",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vmpool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "badbc107526c539e0a22a127931d80a3621caf3ca244a6e336ea38262e08b20c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vmpool_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "189b64e8319dec7267ad1b94746bb11ce3b17f18b245f1cea74de98e179f61b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vnic_profile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "165440d6bc6f89eccd5b32b2ea80c4990f10406a241b3338f47577a449872125",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vnic_profile_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c910f2bae4956f47094b445bdb9c30753737439d643e5eedc1137d1d4902d5da",
+ "format": 1
+ },
+ {
+ "name": "plugins/test",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/test/ovirt_proxied_check.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4cd7ecf3b7f7f467865eaafd095af27707d839b7de357d0a9120e21cdde19d88",
+ "format": 1
+ },
+ {
+ "name": "requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f0d98bb88ceddd3ad1d0628f266b90716aaa52eb7fcef9fb4a65e12c6b2fc9c",
+ "format": 1
+ },
+ {
+ "name": "roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4532bd8b9876ad969eb41f48fc63da9d375cb211299fdaf639f80df7e3cdcf21",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8ca3c1448aef8b92f96382f9b6706333724dffd426c98eb8212f1fb526cdbcd",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/examples/cluster_upgrade.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5328d9e91b8d6e3a6464baae5fc0398f848cfcbcb75398fbdf659ad1b73a403",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/tasks/cluster_policy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f224d8fadda60cb6fbb194c55503874a84b78c220b06f1878ef4f0b5eb66ed0",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47b192262f55865b95a60bca3c3a376613e08a367a3652771a8d044b9424d644",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/tasks/pinned_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "913b4583706ce6b0f3fae66bff7e48e63033194c7d62b7dd4a3601b8ee3b3739",
+ "format": 1
+ },
+ {
+ "name": "roles/cluster_upgrade/tasks/upgrade.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "669193ba86168a88a9102391565dc02f31b4b5ae8979ba114efd2df786ae1ebf",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6646b07417defb12746bb28acf5d4f7dba0d09f5623be42600f8071c82b20a67",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "250d197c20350193f1f3908fc4a512ffed490a02da299c3d4ce8cc3717009e61",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/examples/disaster_recovery_vars.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dedf42ad220874caa57771db26926cf3065508beea14093c2ad595413612d06f",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/examples/dr_ovirt_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6796d067c8ea6241949fce8ef5ec5f35f539542f7163a6c2981f72a2634409f0",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/examples/dr_play.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a76a61f67fc2e56368e660217f140395bfa85d0572aade8298ff9e92b45439a",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/examples/ovirt_passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6368b1291884cbd248e720948fc6a5709f04a1b07bfb78afdb615117a57da0e",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/bcolors.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c50fc3bc67ce7e03419432047893b31eb20a1d1fe57df3fda7a62da93b2fced",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/dr.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "062309053211751364129fc4d899e4e359d15a8cc78c5fb6d085d388e9551021",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/fail_back.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e5500c2865ea027242f33432e76f5603fd7c089d19cda64bc2c097da94559c1",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/fail_over.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b62927f3c3c643eebcf5c3caf3d6c30f6ad29bcf2fd746a363e2c6e1cb1fa6e",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/generate_mapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdcd605745171f37b3049714acc6b830959e31311710aeae02ccb95b831595bc",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/generate_vars.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12026c714066f5e9599a9de7044480992f650c8f6558de123735c09a6483ca7c",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/generate_vars_test.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bad4d717ee7c27a022536bacc01a788c7944b36cc96a769155de5477d2bb3b95",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/ovirt-dr",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c0f5f6f4a2fc10225cf8d85f78c1883bcf25f863e82de6bfd63069a78528502",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/validator.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39d62c0cbaf472f1da6524e2b6ff350344bfd73d5303f2b4b94c98cb64257971",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/files/vault_secret.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e5a276f384cac4d78e76145869238a895b959fb123a33ab652b81b830f8378e",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_disks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f95ea4111d37832d90bf779f7b488396ce8b1484203eeea243369611cf7d69c",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9669ad1f9aca5cf8d44773afda961fe38e44109ff8b9a3a4db4375aebe2991c2",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_domain_process.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdb1add1db74f4e356bbc6b70b1bb35e021e19611b5c993e4a82c83a4f5b8774",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f890531ebb26bc2fd60c6306380a63b946a1b40c17483f38e8c0448e4b27a770",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75901eece54f2bca739edca1aea6664c250ac807a623e2aae5eaa46d9b669dde",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/remove_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73c92eaae18929283041cdf9157b557cd351caad1acd3954290fa7d99733ac0e",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/shutdown_vm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "972a39bc32f92a88ab339bc9fe37329c7f09c090eec5ce591a1a046bd629d087",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/shutdown_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3214ccd2f3b75945bb2094485c67c8bb01009df7e46c7177668e93c30f00d99a",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean/update_ovf_store.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6869581248b71b204a9b00b52d60ce1bf403ffb5177494a24335f3723fa73a1d",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/clean_engine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dcdbaba42714a13dc6bdc2cab32ec65a83493cf4ab45743da24db4371488528b",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/generate_mapping.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "464ba937f2a3d1c1c1558804228ebda45fb21a1ac71d6eda5c00c88231658ecd",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "520f155b552a063eb220cce399f0d970154efc8e786b43648c58b0837326aa2f",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55843fdcf3d56fc4fa6a4511cb6424140a990242ea41a3b0477991f961ca03fe",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_fcp_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f54947cfe01a43788f77644c3513caa54366d4d756a76c951b93d266a8fcc111",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7c15113a493ace2c5d94be0a0e3cb1ba12eb9508eb1d0580f20c73e0dddcf0a",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57a34b9334330959646b8ec09283dbf2faec32b397dec72d62ed54bd135e0332",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_nfs_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0dd3082713c2f2f6ec1c01d86b2b0013646b68197bc9da65c62fdc39cc61fbd",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c646ef03aa1cb6214973e8cf000d81000651b57997d6ea9f25202fdc233a4b42",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/print_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c7241e94bb8cfebc06e4931e08ba5614466348784841bff314e7c4a659b41c6",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/register_template.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fbb90b37cb5d70026c4fbb62b68b5df6adce98cc3de08e8ad1c5ad15f4a6576",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/register_templates.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a9644c03fd9d7689957349e7b064b51531c3df6dc43b30072dca929594ee49c",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/register_vm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e90216450fc6fb5085d901fa9d61d75a461294b82697832d4f1350c3bed714b",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/register_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "abdb2edd24a9773ea83ccf93df6d10bd522ac0a1e8937b092f787ee5a10b8a92",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/report_log_template.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a6b48e869863fab445b983c2d4a4fa43bc3cb76620e3548c25ab8f54c89b91e",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover/run_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eff810c035b57b4187fa74d021f56d0fd75261b7c9a6c5d622d759b153806166",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/recover_engine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "076c6ff3afbe658f322a19a337e078b9a446ff5d0e366866361fdc702494d61f",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/run_unregistered_entities.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bcdf2a81b948d0cd3265f558395fbad9527f8fded1a1813ccacd34baf5b6fc62",
+ "format": 1
+ },
+ {
+ "name": "roles/disaster_recovery/tasks/unregister_entities.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ad031bb3cc53e41bfc5b771faa85e578e81c59679746864d330b1fc9f01f3b5",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42da4e223f3b1b8b7a3a57914f80d26c9d872873a71718be29b06b65d3569023",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "497c165e63064ed743dcf2ce54f69abcf22406897e61abda4eb1f8d4d19196d1",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/examples/engine-deploy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b844e045ee512b9003f655e8f5ce6231d0e9ce4121255f0ed0c8972f9af052d5",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/examples/engine-upgrade.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ddca8cd12921dd36298bc8f26c019db5c289ee362f752b077b5373dadfc4a07",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8fed7a17e985ba5217acda961a8a41c98fa41d56f2b7046a82977da7b3ceea6",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks/engine_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5453992aa84333007e5b4898085be76cc8d750f5c7b695fea21dece28d40485",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks/install_packages.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "950a6fe339d0a81e6a00f54e36c575d1a78ba440ff02ef3cc2223761630b7653",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3fe90b4f82f1d1495c7f1e6d9d9063a02510ad4901437fdb5894f73e64fdd1e5",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks/pre_install_checks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "546afccbd615b9d06b3d329375355fcda58f5c6c1026b5c7f9ca1573da7f521b",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tasks/restore_engine_from_file.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0136bd8bcc2863106ff8e53cb8e718e1d60890c94737335995c6af18b61208db",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.1_basic.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d208930a8c1a973a018c7476a10f3a1857d9562ffa3037af797e62d9fe47aa3",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.1_upgrade.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6295859733ac8997107fe0644a1788ba0b6f88e729aa57e67c9de1d0fb1e2bf4",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.2_basic.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e563077a2da3efc212864c7f23639af0f0271a39e21fb38a27bf9ba53174f86a",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.2_upgrade.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "674c9ed2cda7dca01bbedd549f17ab1ea4d24db6d7cc4308a8683bdb20d00a55",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.3_basic.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3e56da1f79927e222e08a8814165104272951dba6e3da64cd50c67280493165",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.3_upgrade.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4691c06ff6c2c79f7754ed23337bd5c425b1349eef196b58aec360676df57041",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.4_basic.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04d0026ef4e34dd78e375262d80e84e0ae95c6c1f9d38f40c7c5e810bd98fde1",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/answerfile_4.4_upgrade.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64170e548cbe04147f375cf9d1af8fcef33599a23384a46d81506683dc618a43",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/templates/basic_answerfile.txt.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59936bcb7c900b9779ecfed684759145e85eafac01dc7602440e844a8c55c73f",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/containers-deploy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3c2c21cb3d0aa75d7bfeab5a56a1bdbcac587e7d9c09656bb30f3f8f352ece",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/engine-deploy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e898c8c0035f4be61a893e691998bf17cace4ddd4628c3d3f73230b1a8663b2",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/engine-upgrade.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d5698a32c3605fc3c97282a9564a47679af7a23776203c5ff2a9cb349b28d12",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "669dea0f087198b19e89c45cb705e8439f9d1139a29d63264be472ef47b33b9e",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8fed7a17e985ba5217acda961a8a41c98fa41d56f2b7046a82977da7b3ceea6",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc296c4e43917486b4a713e2f50b075b88fff850a6f0901081368428686ea431",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/test-4.2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddbeb082e3e49d7265a4a6b4844009c2ba7761add8c6bbdd93e2c83cbcbe0b75",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/test-master.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4fe629d42f263e313fa104726b7ed911a69192df33bf20552ce89185c2c8425",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/tests/test-upgrade-4.2-to-master.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0eb240194881bb664e0adbad326c5c51c7dd2ae7dccd26867f36bc3b40719dc1",
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/engine_setup/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d162b680ab1278e0deba1007812e444312bde31dc96150507df65122b3ff9cf",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ffa4460cad2b12ac8444d2af6e44e3b33bf1db7a8815d215e9072cd2e013493",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "deab96840dfa0ccf5f57f7f51f49b0efd89a643cd10b0e92685f7ba419bb9a77",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/hosted_engine_deploy_localhost.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67398b5ebb07bcbc56ba3269c3ab11672bdad44ef34eaf5a54d6d834ff1fb05e",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/hosted_engine_deploy_remotehost.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2d43b9deaf49874e8c0b3303a466cea9b2d5848353da5feb5cd2080347863c9",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/iscsi_deployment_remote.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd684ba2bf2c243ea49fcc3f724f80c11f3bff5aec833c539b363e2d8a664029",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/nfs_deployment.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e502f3e646feaf3043835aafafba260ae5dd804901ab11254ef8622a6058689b",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d743b2921acb5121edb77586f3357245704891451720da05f5f7677230f8a94",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/examples/required_networks_fix.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4683c00dfebb6ccb179d892c324dd3f01990812da38b9d2049e1775570ed8a6",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/after_add_host",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/after_add_host/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99f37c2644368879fc3f47df1673128068861c0c551488f3dd1d6c0ef930b943",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/after_setup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/after_setup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21b644ac87bacf4df09f35510e7e47c43e179e5e4c4c297ac381d892e3c101eb",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/after_setup/add_host_storage_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6054612f16dcc287cd6d0230bb0548156d99cafe8bbaeada90cba8a9db89a76",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/enginevm_after_engine_setup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/enginevm_after_engine_setup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab33f419e16208c32590d47c90d428f23dad1528151b994d4ce4dd07ba0955d3",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/enginevm_before_engine_setup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/hooks/enginevm_before_engine_setup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34769789fabe1aa2e5637fb5242e429ff8dc5fc9614d816165787937f767ecff",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/add_engine_as_ansible_host.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fba8098bc9cdae2d437892b0486ef9527caa8e53b9f4a3c867cc5b94d7053e3",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/alter_libvirt_default_net_configuration.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "330c83afd90cfa40110931e0b4338bbb46b42af38d2b851bcfe0a80289801cb5",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/apply_openscap_profile.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd648ff188d16c8668aa894f29fb4a66c789f0b5b3f9e9ef6a5d4ba06307f1fd",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/auth_revoke.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "955dc9b93052b819293777818a4efff63ef88670c16379efea781df9eccb916d",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/auth_sso.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ffd88d9555c665edae9c9b9951f59b64371d723583499a7b8c2f1a34ec42fad",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm/01_prepare_routing_rules.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f491e9ce3aa3e44c47eb5e53b96655a4923855b925e79c6eda77f603ea16c410",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm/02_create_local_vm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79ddea7c5611e4b38d8243d13f86a650a425dedd768b93feebb37113abe861b9",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "469bf3b150f4c212a47953b9d874587f65c5744633412442bb7ebce5ebd3edb8",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm/04_engine_final_tasks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ed8cf23c4fdf64a25c420271e5402881477ac5ee286ea69d51e82b5722fb553",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/bootstrap_local_vm/05_add_host.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "631388a11170620f9c6f1f04720b2bb50534077a64016855db96f4d07f0120d7",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/clean_local_storage_pools.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b46063faa778f6949b4a831d541fcf95b9094dd6f3799f25565b0a08eb4bd71b",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/clean_localvm_dir.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8cb464328e0e376d76d888649689a614c73f3147f9a0c45f551097a0ca980473",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/create_storage_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e273425ad15e84fdbe0f926f0ef0012ebe2e020d65667908f5b8d9ee286d656a",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/create_target_vm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/create_target_vm/01_create_target_hosted_engine_vm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2490c31015e7041d0ca54bc9ec9fee695ca19374acf3596617dafae8571ef4c",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/create_target_vm/02_engine_vm_configuration.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed4a9253f141ee4a84889817578b8bc6bc2cb44c6b2cf4fbd2166536471779ba",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/create_target_vm/03_hosted_engine_final_tasks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c572920c4eaa867b268081e0787303bde83518dd9d0300e1f997837639f54cef",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/fc_getdevices.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59380afb1000799ceec2f8b5ba2835af73b3f4556f85e161af16fe483eb429af",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/fetch_engine_logs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eefed2e0587afa96d3f42b66e2e569b5a600a0988534381b548c606d9bdfe5a1",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/fetch_host_ip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "670c68966b3bed93ed3be9f184baa93997014beb64ca6c70b0525d681b399623",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/filter_team_devices.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "534982c802907226b4d9c87101982d44b1ff972d6c163126a74575c1b3cfe7ac",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/final_clean.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f34840bd6ad16a16f22687d9deee369eef7f53d72f0d172a0a63d89cb5dcbbf5",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/full_execution.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd15adf36f65e0f71ad65b99234f1480fef627d379524d0eeaf720c6494a3d49",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/get_local_vm_disk_path.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a07247ee6f9185c3fb2fcda1236e73c6dee4d83a9927192e3ff30eba32318bb",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/initial_clean.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03e59ab27b60555f77004c2619db393412243073d3fb5a13a8ede7274a689feb",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/install_appliance.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05df7908cb07a1af5525934e74e8caa0c415a49550d386b81c032b79d6f0d4bc",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/install_packages.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "225a2319ba94d5b1281ef1105bca0f8c374976e3a002a0bfa4e19aa0a6cf8058",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/ipv_switch.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d00920e9ded434202a906c771fd07d735507d50bbadd4f8a3bcae2890cf6715",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/iscsi_discover.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8aeaad54e9801c6486d53e3dc9d744f2d7011ceee7c5dafc2f173eda12e8fe70",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/iscsi_getdevices.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b89edf4cc07b7275fda6e45bb32f6e06bdc96c19d06b6d5fd6b6e04eb4693f1a",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27fc692c3676b70eb4d3151dc56485fa1327697b55da5796510f5383e5103c93",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/partial_execution.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8360f8fa2905af647295fbfe86aa152c35486065f08119ecad1ae2e9792ea05",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pause_execution.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5608c88be67b2b6af7ff7a4ed5228ba07449be2fa3706d1af6d38ad75375b30b",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/001_validate_network_interfaces.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7fd55672128ec1aab5b9cfa0de598e85a2bb9380974d13c85620a7205c653cf",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/002_validate_hostname_tasks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13d22794d28ba4b5b0f6566b31a14f8de449dfdf30b7179128d6b0e280fc68e9",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/define_variables.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "accf8a364e7cda4eeddf7756edcf3b4957b960a7da7ad16c7b033fcc87bf96aa",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_data_center_name.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a58d58be85b6873dd2bb5597d3fac7e4a6e3e84bf770852d3e2df34595088289",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_firewalld.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0768344afbaad2d9849edf72025ccb23e15797435d7f38be4988742586af789c",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_gateway.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90b4d76f7ed81dfcdeb8095aea4a521e0208ddc78b4951c112bea07b163efde1",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_mac_address.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d63d1aaa6abeb2d77e883047c86680b46aa991733062078e37dbcb2ff079181",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_memory_size.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3effb146d54747e76cda458f20ac8e421258d559bea54b029f02b001dc993dfb",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_network_test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f31a744391c46fcd5385c2b31ed7807bf02175ff5f8276980d1503922d4416d8",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_services_status.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23cdde23e08e783da4ae7d30f13aaef45737c5305b6afe7c592f344ddf9bdd89",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/pre_checks/validate_vcpus_count.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9f664d3f1d078f236ca44bd9cbcfeccde4eea8fa14a69272acb3fe759516b3f",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/restore_backup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a8b12cf5a6b697b7cad2874b598f560e20adce6828002dbed1c26a23cfedde3",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/search_available_network_subnet.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93ca3e7532532c6d56a609ce71f674aff274614efc76b416119da84060af5a7b",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/sync_on_engine_machine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb7fac356ead6ff30722a722269d816171e6b870457ea3b903fd75d41bdb5f30",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/tasks/validate_ip_prefix.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb11f7de410178f7da0b4d1235835311cbf1a9fb1a415334f8ba3bd5e4a430f",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/broker.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5d0691008ba803c77c10312be55b10e47ca2e9d04049c8751c12aac2c63452a",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/fhanswers.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd814bc72fb77a283864d2d2bcc729cd1e1424e55d96d3e3c52ac7fe86c4ed6e",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/hosted-engine.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cda514d7c23c002f7663a9ce16ad114fd1fe6e715b9d9f35d4e6474d7b7be62",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/ifcfg-eth0-dhcp.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47f188d5d7f0c676a3bb4cdcd10eade0d329f2b22e898ee2865b5a99958f0f28",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/ifcfg-eth0-static-ipv6.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3162684e45d39d03fdf30352bd4ad0b28a98f3314fc0c08f4efcdedfd50cd2e",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/ifcfg-eth0-static.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c5713af9904015a96eb5c85156013c86d6262978329a2ecb098d3f2157632aa",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/meta-data.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4d1e14ea63ecadccab3ad35809d1126103ac83c0b8348af1c7ec1f9eeb5356d",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/network-config-dhcp.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f3c4a0cbfd45674a49a269d7be2060fbc9debe94243ba095f018b95f2aae88d",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/user-data.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee53560828d127e697b6fac3e706af225214050da817ef0ba474619233a18f56",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/version.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df9d6e828900b2b252f9222428edb7aa98e0a96f4885c6626397c4281aa0b01c",
+ "format": 1
+ },
+ {
+ "name": "roles/hosted_engine_setup/templates/vm.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c03f85871038cf94350c5146ce76095a0d4b4b3859a0ced4de9d53d4c63927f4",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "423b64e85fcb2709a5c3cfcbd62715625ec8d88a51fed1a816639f07e89a400f",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "067ecccb5371a364a0f3addff4870bf1bf8a8985b8bd39dfebc75db057005e77",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/examples/ovirt_image_template.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95d7791b3e7a66e70826f0f853dec13407ff2f5f434daa01c023774d86e6128d",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/tasks/empty.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b816d1d0805c1fa6b117c44c2c1e5f7f09eca24dfd561ce2dbab43696b5ab70b",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/tasks/glance_image.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30a5f841040744fda61283a0e868ab6159762632710504fbde63c1385fcc83ac",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80a9b0eb9af97bc7a9240cd20ac1f6c1c1e7349ec4977c4874af33b480152c74",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/tasks/qcow2_image.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c9798603d9493e915a1f6db2fea1556d255ab1d6b44cd0db02c483e4e18d028",
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/image_template/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1785c9a8c3028367d2ba75256fa7260c079392212151d682acd55cab7750fbc",
+ "format": 1
+ },
+ {
+ "name": "roles/infra",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "687926581811bf17a92b05d82de0b1f1d1d8937ac2e39ae087a86ad4fc270196",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43cbf9c83626b92441ab813a6ed0967521abd594700db7f4a74afb10fb869634",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples/ovirt_infra.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a33e886ee1863693e8596d4e97aa28cdd0f774ab57f766699d3b80dd5ae7bdce",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples/ovirt_infra_destroy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec9b972a04e5e16cb267b9c492935300023dd2751d530b606c22852d7eb6eaee",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples/vars/ovirt_infra_vars.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea7382f07df13bb80695ee19c38c52aaa48138f85a5fd2a9c7a78efaf6f19411",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/examples/vars/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4375cfc057fb42b16a8b0fbeb7715712355cac4d03e440b396608b1bf4fa27cc",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17d0abc72b21f8c4705c7f3a2685e127b5a35fd0fe7b7e8fd1d7fcf70ba00de3",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/aaa_jdbc/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ad219fb1800820f9b63afb3e9299537b6b241be8387709104521aa9e2170a58",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f7e8ddf2319f57e14f63216f2579e695b3f51fe9f1db47442ca7d9e3fd60846",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "625b705612a5fb64864c28bd8bd33ba125811047c571ab8446066af968236b35",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/clusters/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dd97b72b356fb9fda8d10d8c5877c5a9ad8db4b42cf17018d983ab56cbee10a",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f50289bf733588f37db2429f37781ded326c0d74e18c697d79515022e5f38657",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30ac2342a8f199951885187e8974c55a3c5a4bc0283746e430a5e34804d4f895",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/cluster_and_hosts.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eaec02db023b502b330e5400406153522e0f70441d013e934c0b4db72fa9e45f",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/datacenter.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e569a2c53d245cf4fd7dc8a61a5cc6818ef35c9ecc1f00f9347985b1f0862f1",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/disks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "356a72a24f1fc2190b8f13ac1a9b51422a444e221f3fe2a7085f91c22ac7f9bb",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e04a79449a131fc5fed5c509b2c8a268cef7c167cbb7443ad13f2d402c02a48",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/storages_last.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56f222731a7c058905e9d003ecda5bf9de0ce94f9d71b7650157552919504052",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/storages_pre.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "570a2b8e93e98cc0bfbd78430959a0db65d500f54a39308db3a4f84394a618c6",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/templates.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42535d73df3a7563605686e07752af0f911435b25a80a04ab9241f5b0c65c386",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/vm_pools.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "869addec6825d3ef8aef24b6ecdd96d759af75ecc99e9ce5f7108b2fe33b69fa",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenter_cleanup/tasks/vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d82a8266fb68387223bf4a821cb6c9abe17a51a2567eb14e0e718d5558fecb12",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "214b779ef23478dac5fbca69527d16ef268ef69a0d21ecb90c9299b05b901599",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddb8e440f777516ca7dc411535887948bcbe53246b4181b3cb198f80dc472da3",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/datacenters/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e1a6314e294b65a6eb18eebfda9022d847f32e1a6b46a65f4346e16b5bf64b4",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/external_providers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/external_providers/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a02ecce75eb99b2607ea7fd89cfbc6d3a3078d99d84f3156ea383fd3bc0cc6f2",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/external_providers/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/external_providers/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5bfafe45e58d4d79d867d3f0b330d824f16b5635dc7a5891846fddc1574f82cb",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96032aa0f341255e170ed1f3acd3ad94c7ab33fc2b3d0d1d3531ff19b90e82e9",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ffe29f2b3b80f58c6e9b98baee20af1af525b30a5e1eea75348e3e2e1a554ce",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/hosts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75c0b24a0866825f6011c9b20897c8f810c008ce31f03e2fd3569007ef16de08",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/mac_pools",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/mac_pools/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4e597a6aff75657e0a3d56d5d1624f0a5a19ff7e351f120fc1fb4b7d0210923",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/mac_pools/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/mac_pools/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "668356454fedd30e240952f68f64a3b591331c225b51c4d22aa361cc976ebbd4",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/networks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/networks/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69ba323cfd973321a9e768aace91dbbbb5983dd2814598e77fb1d55ccf6b6fd3",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/networks/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/networks/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ce7f9d11412eeb36ccb417eaffa32a0d21ee3608dbc5ebcfa9c648747b66c42",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/permissions",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/permissions/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "373fe54a49924231191bba8d6f2b1a6eff00a0bcff5b73f73eef3fbc880e1f59",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/permissions/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/permissions/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54d4344381fee5124b217646ed15a91e0553ef9823f6d9a8bc5dc37702c27703",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/storages",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/storages/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0222d631ed65ab1854640e857d65de182d127649043859d746419670b5b32bd4",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/storages/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/roles/storages/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a25d90f905275255f42df1985350970460cdb8818593c238c12bf768e31a10ef",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/infra/tasks/create_infra.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb1a2b6ff26a441f52736f81f952b5170d627352af0d481f30b0ab5f97ef9f7b",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2ecc55afee3c76c2713d4f8b6cf2da734bc6b9d010d86d91dce3c8aeb1f1239",
+ "format": 1
+ },
+ {
+ "name": "roles/infra/tasks/remove_infra.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4a52e50d98c1e806c06007dad449c6a327dc88ce70399eae45a0aaa3b569667",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ae5c08ef20d2883d130234e0571836c75c80849027ceb74527c40e8f355f5d5",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2708faedc9f936a3864e36e46c7a9eea2447fd264c2c27914d5aa913b8583a25",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/examples/cfme.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eea3955244fa9a457115b9413bb4c1d734ac1001333c353ca451c54440130279",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/examples/manageiq.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25651160e250b7476a75b5365192b3d36454d15074bb9864b17ea3959975a7b7",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/tasks/cfme_add_disk.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79ac34f0ca3f58b9d61a4d9ceb216434cfa948648916104b5efb84a91ad06d86",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/tasks/cfme_identify_disk_device.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e187547936ccd37baf00fad926bdc715b7601896680a885eeb27b81e0e6e124",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/tasks/deploy_qcow2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "984ec534a8e685c9855e6ce10251c188c6b0d84d08b145c8c38c1aa1c10c3e0c",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/tasks/init_cfme.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a63e332775465c290ab4a26c18f0d1415d5ab0eda8b5c78b1e639fd32a4d1fd9",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a50b2926198860d40c8d2a403814f90ab01bd05e18494f5ff06526cb623d5bc7",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/tasks/manage_appliance_roles.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10373b8fb2b9a826e6a3f7ffd24bceffd6496a998585d54678182939e4dd23ea",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/tasks/wait_for_api.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3403901595b39b67c0feea474773d6c087e376f300644eaa7e84bb2cdada6a7",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/templates/add_rhv_provider.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "547fc71fea2d2cba194150e042560304e0ae2b025e56cbf2864bd6633b40ee68",
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/manageiq/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8cf03c72d4549809d717cacd28eef427cf12160e8ff37cf67c51b161458969dc",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9d31db6d778a264d482ed0907182ac8478f40e046beda0567bf37a574018577",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ececa034fc3f1aa227a32b5a82ffd29c7d934e887c40a40cf350d6ef186d9c00",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/examples/ovirt_repositories_release_rpm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16f013b459194303f4a4b16485a9ded42c866ae244c024ef6bca5e544e1779cd",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/examples/ovirt_repositories_subscription_manager.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb5e84201ed0b91de44f606f4b2a930ce07065de4eb98ce137d41256399e1266",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7baec1da55ec214cdeaf66cb5fbdce88498268997b2a4bb5b6a3fc5a093e4e06",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks/backup-repos.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc0c7fff9ee827579799d2a124097a63ee6417346f6d2eb30a7159d7e909e648",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eba10755453cca657d22bfb26b79892276621539453ade25d6fc548f14c001ca",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks/rh-subscription.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b48aadd45e9352a00258e55612b08ed6956b4d242ebf35a14fc4481af8077251",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/tasks/rpm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4881f1c819179810d8dd23978bbfae42785ca2e6cf85039e7f82c1bffd82505b",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f3190dd83d2c27e2cd4b4cc36b9f574075ac41cd8a62823a7a9c119c0bae624",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/engine_4.1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f01ec6b4fcfc630b4f8a139b4f30123e7805ed2976cba75dc9a3e3c380fc5db1",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/engine_4.2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ced8a355735ce4d3636dc76bc7d63a6a71834064155399f971d9cb37da3237c1",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/engine_4.3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6936324dbf3686dab7f3a0fd7586a7f1db9d56e1fcc60c8033b94522d393997e",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/engine_4.4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "124181b3aecad8b00cb19a063efb68fc5dc282d374a72a4f2a707035db88161e",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/host_4.1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3171ba133adc54ba539e763246251b0f833dc8603d5a46243b55d82fbb80490",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/host_4.2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a97eeb8025db4ed4a5c88bf2a652f41982f48a2ce195e3c47b0990897873cd6",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/host_4.3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec3616b3d9433ef599822a6131e7d3168d5b5bb75712f0b69a1c822459cd6145",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/host_4.4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12558a697a00356b11dca5fb9c21b509a3f78f2af049b74d716e9440f867d74c",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/rhvh_4.1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbc95494cc017f3b7ccf608dc59b77394847929474531547fe5a6448d71d8b16",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/rhvh_4.2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbc95494cc017f3b7ccf608dc59b77394847929474531547fe5a6448d71d8b16",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/rhvh_4.3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbc95494cc017f3b7ccf608dc59b77394847929474531547fe5a6448d71d8b16",
+ "format": 1
+ },
+ {
+ "name": "roles/repositories/vars/rhvh_4.4.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe7220fb776160b30f86fe7f9b70c41ae4d26e774d14a80951bf9b91aaacaffb",
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1bb8523fef9d1dc2ccd7202761c9085edb675f01d3205401117be6311cd1e0e",
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23ee730fc457add36b19f314667fcea6891341e5e8ce982cd64f47773b7621fe",
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/examples/shutdown_env.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "845381aee5af25a91b98ae136d2b68fe217c686e21caa74b2016405c98194d5f",
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/shutdown_env/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e05605915875635bb9499fefe682bf38b47dc1a5585becebd16e2eb522b95e53",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8c6eebe73201d32862598f55e691204600b3b3d060e61cc233ff809b19ee3c1",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fda4d84547c6f317c7048ce557138ac6f73bcbc3a8c19bcdd31e02282eda0b0e",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/examples",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/examples/ovirt_vm_infra.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb28b0719be25e8690506c0f038edd9a520b8d82389ea4691203425d558afab2",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/examples/ovirt_vm_infra_inv.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "800510a76705a8b1ac6a8b94f31826c2f303aa74730e151cdfe0b3984eaa6eb7",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/examples/passwords.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c135528dad4a7ec75c51b21ebee33d4a41a0ed73088e828e90f0ee34a9dbd003",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/affinity_groups.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99de64bdc087e561bccb1adacf980271a66654f63ce536678cade94a8b6e9ca2",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/affinity_labels.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1302469d26a335dab3169677c35ae70d96b84272e586c27786aabf7f06a5468e",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/create_inventory.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "350975f10b5f73248d8ff8ec0897fce8e1eca09c1d8314bfc8314ab211930250",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/create_vms.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa50bd4eb9a4dd934dfa935f72ffe197052961bd84918d8db0fb6fbac2b6202a",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56cf6bce2af5e5c5611099563d47a352e01de84f32dc7ceef2603b9d2e082ca5",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/manage_state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a7a7ffb714d8ed52d2e4627f38dba3b0f073c7e76dcf57debffe9a5c3b78b5d",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/vm_state_absent.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d5d08e9ac19af8523d7e8b0330294810e91d5ad77d4d4b67e1ccd61388ddda4",
+ "format": 1
+ },
+ {
+ "name": "roles/vm_infra/tasks/vm_state_present.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d53ec50ab2525209a005f810ded3cc4bd92eb8d4ffc1bc8814edd5b447c7266",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/.gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5726d3ec9335a09c124469eca039523847a6b0f08a083efaefd002b83326600",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.10.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35416a0e4c824db47f89daab44b368c2dc48ed010ce44b952f46455eb4cc5fd9",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.9.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35416a0e4c824db47f89daab44b368c2dc48ed010ce44b952f46455eb4cc5fd9",
+ "format": 1
+ },
+ {
+ "name": "ovirt-ansible-collection.spec",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4efd92e90a5407ca64145ebdd534fb744447ad05a863c46dd48e7a1fe9c2c7fb",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/MANIFEST.json b/collections-debian-merged/ansible_collections/ovirt/ovirt/MANIFEST.json
new file mode 100644
index 00000000..dafb665e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/MANIFEST.json
@@ -0,0 +1,33 @@
+{
+ "collection_info": {
+ "namespace": "ovirt",
+ "name": "ovirt",
+ "version": "1.3.0",
+ "authors": [
+ "Martin Necas <mnecas@redhat.com>"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "collection"
+ ],
+ "description": "The oVirt Ansible Collection.",
+ "license": [
+ "Apache-2.0",
+ "GPL-3.0-or-later"
+ ],
+ "license_file": null,
+ "dependencies": {},
+ "repository": "https://github.com/ovirt/ovirt-ansible-collection",
+ "documentation": null,
+ "homepage": "https://www.ovirt.org/",
+ "issues": null
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "794c200b7f2091803748928ca8ce3749a326a5f5500c22e7b9a369e4a6219913",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/README-developers.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/README-developers.md
new file mode 100644
index 00000000..21a4c200
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/README-developers.md
@@ -0,0 +1,4 @@
+README for developers
+====================================
+
+You can find all information around [development](https://github.com/oVirt/ovirt-ansible-collection/wiki/Development) or [project structure](https://github.com/oVirt/ovirt-ansible-collection/wiki/Project-structure) in [wiki](https://github.com/oVirt/ovirt-ansible-collection/wiki).
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/README.md
new file mode 100644
index 00000000..bf0e68d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/README.md
@@ -0,0 +1,89 @@
+[![Build Status](https://jenkins.ovirt.org/job/oVirt_ovirt-ansible-collection_standard-check-pr/badge/icon)](https://jenkins.ovirt.org/job/oVirt_ovirt-ansible-collection_standard-check-pr/)
+[![Build Status](https://img.shields.io/badge/docs-latest-blue.svg)](https://docs.ansible.com/ansible/2.10/collections/ovirt/ovirt/index.html)
+
+oVirt Ansible Collection
+====================================
+
+The `ovirt.ovirt` manages all oVirt Ansible modules.
+
+The pypi installation is no longer supported if you want
+to install all dependencies do it manually or install the
+collection from RPM and it will be done automatically.
+
+Note
+----
+Please note that when installing this collection from Ansible Galaxy you are instructed to run following command:
+
+```bash
+$ ansible-galaxy collection install ovirt.ovirt
+```
+
+Requirements
+------------
+
+ * Ansible version 2.9.11 or higher
+ * Python SDK version 4.4 or higher
+ * Python netaddr library on the ansible controller node
+
+Content of the collection
+----------------
+
+* modules:
+ * ovirt_* - Modules to manage objects in ovirt Engine
+ * ovirt_*_info - Modules to gather information about objects in ovirt Engine
+* roles:
+ * cluster_upgrade
+ * engine_setup
+ * hosted_engine_setup
+ * image_template
+ * infra
+ * manageiq
+ * repositories
+ * shutdown_env
+ * vm_infra
+ * disaster_recovery
+* inventory plugin
+
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: ovirt ansible collection
+ hosts: localhost
+ connection: local
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+ tasks:
+ - block:
+ # The use of ovirt.ovirt before ovirt_auth is to check if the collection is correctly loaded
+ - name: Obtain SSO token with using username/password credentials
+ ovirt.ovirt.ovirt_auth:
+ url: https://ovirt.example.com/ovirt-engine/api
+ username: admin@internal
+ ca_file: ca.pem
+ password: "{{ ovirt_password }}"
+
+ # Previous task generated I(ovirt_auth) fact, which you can later use
+ # in different modules as follows:
+ - ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: absent
+ name: myvm
+
+ always:
+ - name: Always revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ collections:
+ - ovirt.ovirt
+```
+
+Licenses
+-------
+
+- Apache License 2.0
+- GNU General Public License 3.0
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/automation.yaml b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation.yaml
new file mode 100644
index 00000000..72847042
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation.yaml
@@ -0,0 +1,4 @@
+distros:
+ - el8
+release_branches:
+ master: [ "ovirt-master" ]
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/README.md
new file mode 100644
index 00000000..1b6a3997
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/README.md
@@ -0,0 +1,8 @@
+Continuous Integration Scripts
+==============================
+
+This directory contains scripts for Continuous Integration provided by
+[oVirt Jenkins](http://jenkins.ovirt.org/)
+system and follows the standard defined in
+[Build and test standards](http://www.ovirt.org/CI/Build_and_test_standards)
+wiki page.
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts-manual.packages b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts-manual.packages
new file mode 100644
index 00000000..4f6739c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts-manual.packages
@@ -0,0 +1,8 @@
+dnf-utils
+git
+ansible
+python3-voluptuous
+python3-pycodestyle
+python3-pylint
+yamllint
+glibc-langpack-en
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts-manual.sh b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts-manual.sh
new file mode 100755
index 00000000..ed87c79f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts-manual.sh
@@ -0,0 +1,77 @@
+#!/bin/bash -xe
+
+ROOT_PATH=$PWD
+
+# remove any previous artifacts
+rm -rf ../ansible_collections
+rm -f ./*tar.gz
+
+# Create exported-artifacts
+[[ -d exported-artifacts ]] || mkdir -p $ROOT_PATH/exported-artifacts
+
+# Create builds
+
+./build.sh build ovirt $ROOT_PATH
+./build.sh build rhv $ROOT_PATH
+
+OVIRT_BUILD=$ROOT_PATH/ansible_collections/ovirt/ovirt/
+RHV_BUILD=$ROOT_PATH/ansible_collections/redhat/rhv
+
+cd $OVIRT_BUILD
+# create the src.rpm
+rpmbuild \
+ -D "_srcrpmdir $ROOT_PATH/output" \
+ -D "_topmdir $ROOT_PATH/rpmbuild" \
+ -ts ./*.gz
+
+# install any build requirements
+yum-builddep $ROOT_PATH/output/*src.rpm
+
+# Remove the tarball so it will not be included in galaxy build
+mv ./*.gz $ROOT_PATH/exported-artifacts/
+
+# Overwrite github README with dynamic
+mv ./README.md.in ./README.md
+
+# create tar for galaxy
+ansible-galaxy collection build
+
+# create the rpms
+rpmbuild \
+ -D "_rpmdir $ROOT_PATH/output" \
+ -D "_topmdir $ROOT_PATH/rpmbuild" \
+ --rebuild $ROOT_PATH/output/*.src.rpm
+
+cd $RHV_BUILD
+
+# Remove the tarball so it will not be included in automation hub build
+rm -rf *.gz
+
+# Overwrite github README with dynamic
+mv ./README.md.in ./README.md
+
+# create tar for automation hub
+ansible-galaxy collection build
+
+# Store any relevant artifacts in exported-artifacts for the ci system to
+# archive
+find $ROOT_PATH/output -iname \*rpm -exec mv "{}" $ROOT_PATH/exported-artifacts/ \;
+
+# Export build for Ansible Galaxy
+mv $OVIRT_BUILD/*tar.gz $ROOT_PATH/exported-artifacts/
+# Export build for Automation Hub
+mv $RHV_BUILD/*tar.gz $ROOT_PATH/exported-artifacts/
+
+COLLECTION_DIR="/usr/local/share/ansible/collections/ansible_collections/ovirt/ovirt"
+export ANSIBLE_LIBRARY="$COLLECTION_DIR/plugins/modules"
+mkdir -p $COLLECTION_DIR
+cp -r $OVIRT_BUILD/* $COLLECTION_DIR
+cd $COLLECTION_DIR
+
+pip3 install rstcheck antsibull-changelog ansible-lint
+
+ansible-test sanity
+/usr/local/bin/antsibull-changelog lint
+/usr/local/bin/ansible-lint roles/* -x 204
+
+cd $ROOT_PATH
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts.packages b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts.packages
new file mode 100644
index 00000000..4f6739c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts.packages
@@ -0,0 +1,8 @@
+dnf-utils
+git
+ansible
+python3-voluptuous
+python3-pycodestyle
+python3-pylint
+yamllint
+glibc-langpack-en
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts.sh b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts.sh
new file mode 100755
index 00000000..ed87c79f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/build-artifacts.sh
@@ -0,0 +1,77 @@
+#!/bin/bash -xe
+
+ROOT_PATH=$PWD
+
+# remove any previous artifacts
+rm -rf ../ansible_collections
+rm -f ./*tar.gz
+
+# Create exported-artifacts
+[[ -d exported-artifacts ]] || mkdir -p $ROOT_PATH/exported-artifacts
+
+# Create builds
+
+./build.sh build ovirt $ROOT_PATH
+./build.sh build rhv $ROOT_PATH
+
+OVIRT_BUILD=$ROOT_PATH/ansible_collections/ovirt/ovirt/
+RHV_BUILD=$ROOT_PATH/ansible_collections/redhat/rhv
+
+cd $OVIRT_BUILD
+# create the src.rpm
+rpmbuild \
+ -D "_srcrpmdir $ROOT_PATH/output" \
+ -D "_topmdir $ROOT_PATH/rpmbuild" \
+ -ts ./*.gz
+
+# install any build requirements
+yum-builddep $ROOT_PATH/output/*src.rpm
+
+# Remove the tarball so it will not be included in galaxy build
+mv ./*.gz $ROOT_PATH/exported-artifacts/
+
+# Overwrite github README with dynamic
+mv ./README.md.in ./README.md
+
+# create tar for galaxy
+ansible-galaxy collection build
+
+# create the rpms
+rpmbuild \
+ -D "_rpmdir $ROOT_PATH/output" \
+ -D "_topmdir $ROOT_PATH/rpmbuild" \
+ --rebuild $ROOT_PATH/output/*.src.rpm
+
+cd $RHV_BUILD
+
+# Remove the tarball so it will not be included in automation hub build
+rm -rf *.gz
+
+# Overwrite github README with dynamic
+mv ./README.md.in ./README.md
+
+# create tar for automation hub
+ansible-galaxy collection build
+
+# Store any relevant artifacts in exported-artifacts for the ci system to
+# archive
+find $ROOT_PATH/output -iname \*rpm -exec mv "{}" $ROOT_PATH/exported-artifacts/ \;
+
+# Export build for Ansible Galaxy
+mv $OVIRT_BUILD/*tar.gz $ROOT_PATH/exported-artifacts/
+# Export build for Automation Hub
+mv $RHV_BUILD/*tar.gz $ROOT_PATH/exported-artifacts/
+
+COLLECTION_DIR="/usr/local/share/ansible/collections/ansible_collections/ovirt/ovirt"
+export ANSIBLE_LIBRARY="$COLLECTION_DIR/plugins/modules"
+mkdir -p $COLLECTION_DIR
+cp -r $OVIRT_BUILD/* $COLLECTION_DIR
+cd $COLLECTION_DIR
+
+pip3 install rstcheck antsibull-changelog ansible-lint
+
+ansible-test sanity
+/usr/local/bin/antsibull-changelog lint
+/usr/local/bin/ansible-lint roles/* -x 204
+
+cd $ROOT_PATH
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/check-patch.packages b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/check-patch.packages
new file mode 100644
index 00000000..4f6739c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/check-patch.packages
@@ -0,0 +1,8 @@
+dnf-utils
+git
+ansible
+python3-voluptuous
+python3-pycodestyle
+python3-pylint
+yamllint
+glibc-langpack-en
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/check-patch.sh b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/check-patch.sh
new file mode 100755
index 00000000..ed87c79f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/automation/check-patch.sh
@@ -0,0 +1,77 @@
+#!/bin/bash -xe
+
+ROOT_PATH=$PWD
+
+# remove any previous artifacts
+rm -rf ../ansible_collections
+rm -f ./*tar.gz
+
+# Create exported-artifacts
+[[ -d exported-artifacts ]] || mkdir -p $ROOT_PATH/exported-artifacts
+
+# Create builds
+
+./build.sh build ovirt $ROOT_PATH
+./build.sh build rhv $ROOT_PATH
+
+OVIRT_BUILD=$ROOT_PATH/ansible_collections/ovirt/ovirt/
+RHV_BUILD=$ROOT_PATH/ansible_collections/redhat/rhv
+
+cd $OVIRT_BUILD
+# create the src.rpm
+rpmbuild \
+ -D "_srcrpmdir $ROOT_PATH/output" \
+ -D "_topmdir $ROOT_PATH/rpmbuild" \
+ -ts ./*.gz
+
+# install any build requirements
+yum-builddep $ROOT_PATH/output/*src.rpm
+
+# Remove the tarball so it will not be included in galaxy build
+mv ./*.gz $ROOT_PATH/exported-artifacts/
+
+# Overwrite github README with dynamic
+mv ./README.md.in ./README.md
+
+# create tar for galaxy
+ansible-galaxy collection build
+
+# create the rpms
+rpmbuild \
+ -D "_rpmdir $ROOT_PATH/output" \
+ -D "_topmdir $ROOT_PATH/rpmbuild" \
+ --rebuild $ROOT_PATH/output/*.src.rpm
+
+cd $RHV_BUILD
+
+# Remove the tarball so it will not be included in automation hub build
+rm -rf *.gz
+
+# Overwrite github README with dynamic
+mv ./README.md.in ./README.md
+
+# create tar for automation hub
+ansible-galaxy collection build
+
+# Store any relevant artifacts in exported-artifacts for the ci system to
+# archive
+find $ROOT_PATH/output -iname \*rpm -exec mv "{}" $ROOT_PATH/exported-artifacts/ \;
+
+# Export build for Ansible Galaxy
+mv $OVIRT_BUILD/*tar.gz $ROOT_PATH/exported-artifacts/
+# Export build for Automation Hub
+mv $RHV_BUILD/*tar.gz $ROOT_PATH/exported-artifacts/
+
+COLLECTION_DIR="/usr/local/share/ansible/collections/ansible_collections/ovirt/ovirt"
+export ANSIBLE_LIBRARY="$COLLECTION_DIR/plugins/modules"
+mkdir -p $COLLECTION_DIR
+cp -r $OVIRT_BUILD/* $COLLECTION_DIR
+cd $COLLECTION_DIR
+
+pip3 install rstcheck antsibull-changelog ansible-lint
+
+ansible-test sanity
+/usr/local/bin/antsibull-changelog lint
+/usr/local/bin/ansible-lint roles/* -x 204
+
+cd $ROOT_PATH
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/bindep.txt b/collections-debian-merged/ansible_collections/ovirt/ovirt/bindep.txt
new file mode 100644
index 00000000..cd4fa528
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/bindep.txt
@@ -0,0 +1,9 @@
+openssl-devel [platform:rpm compile]
+gcc [platform:rpm compile]
+libcurl-devel [platform:rpm compile]
+libxml2-devel [platform:rpm compile]
+python3-pycurl [platform:rpm]
+python3-netaddr [platform:rpm]
+python3-jmespath [platform:rpm]
+python3-passlib [platform:rpm epel]
+qemu-img [platform:rpm]
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/build.sh b/collections-debian-merged/ansible_collections/ovirt/ovirt/build.sh
new file mode 100755
index 00000000..ad198549
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/build.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+VERSION="1.3.0"
+MILESTONE=
+RPM_RELEASE="1"
+
+BUILD_TYPE=$2
+BUILD_PATH=$3
+
+if [[ $BUILD_TYPE = "rhv" ]]; then
+COLLECTION_NAMESPACE="redhat"
+COLLECTION_NAME="rhv"
+else
+COLLECTION_NAMESPACE="ovirt"
+COLLECTION_NAME="ovirt"
+fi
+PACKAGE_NAME="ovirt-ansible-collection"
+PREFIX=/usr/local
+DATAROOT_DIR=$PREFIX/share
+COLLECTIONS_DATAROOT_DIR=$DATAROOT_DIR/ansible/collections/ansible_collections
+DOC_DIR=$DATAROOT_DIR/doc
+PKG_DATA_DIR=${PKG_DATA_DIR:-$COLLECTIONS_DATAROOT_DIR}
+PKG_DATA_DIR_ORIG=${PKG_DATA_DIR_ORIG:-$PKG_DATA_DIR}
+PKG_DOC_DIR=${PKG_DOC_DIR:-$DOC_DIR/$PACKAGE_NAME}
+
+RPM_VERSION=$VERSION
+PACKAGE_VERSION=$VERSION
+[ -n "$MILESTONE" ] && PACKAGE_VERSION+="_$MILESTONE"
+DISPLAY_VERSION=$PACKAGE$VERSION
+
+TARBALL="$PACKAGE_NAME-$PACKAGE_VERSION.tar.gz"
+
+dist() {
+ echo "Creating tar archive '$TARBALL' ... "
+ sed \
+ -e "s|@RPM_VERSION@|$RPM_VERSION|g" \
+ -e "s|@RPM_RELEASE@|$RPM_RELEASE|g" \
+ -e "s|@PACKAGE_NAME@|$PACKAGE_NAME|g" \
+ -e "s|@PACKAGE_VERSION@|$PACKAGE_VERSION|g" \
+ < ovirt-ansible-collection.spec.in > ovirt-ansible-collection.spec
+
+ find ./* -not -name '*.spec' -type f | tar --files-from /proc/self/fd/0 -czf "$TARBALL" ovirt-ansible-collection.spec
+ echo "tar archive '$TARBALL' created."
+}
+
+install() {
+ echo "Installing data..."
+ mkdir -p $PKG_DATA_DIR/$COLLECTION_NAMESPACE/$COLLECTION_NAME
+ mkdir -p $PKG_DOC_DIR
+
+ cp -pR plugins/ roles/ $PKG_DATA_DIR/$COLLECTION_NAMESPACE/$COLLECTION_NAME
+
+ if [[ $BUILD_TYPE = "rhv" ]]; then
+ echo "Creating link to ovirt.ovirt"
+ mkdir -p $PKG_DATA_DIR/ovirt
+ ln -f -s $PKG_DATA_DIR_ORIG/redhat/rhv $PKG_DATA_DIR/ovirt/ovirt
+ fi
+ echo "Installation done."
+}
+
+rename() {
+ echo "Renaming ovirt to $COLLECTION_NAMESPACE and ovirt to $COLLECTION_NAME"
+ for file in $(find ./* -type f)
+ do
+ sed -i -e "s/ovirt/$COLLECTION_NAMESPACE/g" -e "s/ovirt/$COLLECTION_NAME/g" $file
+ done
+}
+
+build() {
+ if [[ $BUILD_PATH ]]; then
+ BUILD_PATH=$BUILD_PATH/ansible_collections/$COLLECTION_NAMESPACE/$COLLECTION_NAME/
+ mkdir -p $BUILD_PATH
+ echo "The copying files to $BUILD_PATH"
+ cp --parents $(git ls-files) $BUILD_PATH
+ cd $BUILD_PATH
+ rename
+ dist
+ fi
+}
+
+$1
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/README.md
new file mode 100644
index 00000000..d14345ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/README.md
@@ -0,0 +1,28 @@
+# Fragments
+
+## Content of fragmnet
+
+### Example
+
+```yaml
+---
+minor_changes:
+ - ovirt_disk - Add backup (https://github.com/oVirt/ovirt-ansible-collection/pull/57).
+
+```
+
+### Types
+
+- major_changes
+- minor_changes
+- bugfixes
+- breaking_changes
+- deprecated_features
+- removed_features
+- security_fixes
+
+## Commands
+
+`antsibull-changelog lint`
+
+`antsibull-changelog release` \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/changelog.yaml b/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/changelog.yaml
new file mode 100644
index 00000000..dad83523
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/changelog.yaml
@@ -0,0 +1,357 @@
+ancestor: null
+releases:
+ 1.0.0:
+ changes:
+ bugfixes:
+ - ovirt_snapshot - Disk id was incorrectly set as disk_snapshot_id (https://github.com/oVirt/ovirt-ansible-collection/pull/5).
+ - ovirt_storage_domain - Fix update_check warning_low_space (https://github.com/oVirt/ovirt-ansible-collection/pull/10).
+ - ovirt_vm - Remove deprecated warning of boot params (https://github.com/oVirt/ovirt-ansible-collection/pull/3).
+ minor_changes:
+ - ovirt_cluster - Add migration_encrypted option (https://github.com/oVirt/ovirt-ansible-collection/pull/17).
+ - ovirt_vm - Add bios_type (https://github.com/oVirt/ovirt-ansible-collection/pull/15).
+ fragments:
+ - 10-ovirt_storage_domain-fix-update_check-warning_low_space.yaml
+ - 15-ovirt_vm-add-bios_type.yaml
+ - 17-ovirt_cluster-add-migration_encrypted.yaml
+ - 3-ovirt_vm-remove-deprecated-warning-boot-params.yaml
+ - 5-ovirt_snapshot-disk id-was-incorrectly set-as-disk_snapshot_id.yaml
+ modules:
+ - description: Module to manage affinity groups in oVirt/RHV
+ name: ovirt_affinity_group
+ namespace: ''
+ - description: Module to manage affinity labels in oVirt/RHV
+ name: ovirt_affinity_label
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV affinity labels
+ name: ovirt_affinity_label_info
+ namespace: ''
+ - description: Retrieve information about the oVirt/RHV API
+ name: ovirt_api_info
+ namespace: ''
+ - description: Module to manage authentication to oVirt/RHV
+ name: ovirt_auth
+ namespace: ''
+ - description: Module to manage clusters in oVirt/RHV
+ name: ovirt_cluster
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV clusters
+ name: ovirt_cluster_info
+ namespace: ''
+ - description: Module to manage data centers in oVirt/RHV
+ name: ovirt_datacenter
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV datacenters
+ name: ovirt_datacenter_info
+ namespace: ''
+ - description: Module to manage Virtual Machine and floating disks in oVirt/RHV
+ name: ovirt_disk
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV disks
+ name: ovirt_disk_info
+ namespace: ''
+ - description: Create or delete an event in oVirt/RHV
+ name: ovirt_event
+ namespace: ''
+ - description: This module can be used to retrieve information about one or more
+ oVirt/RHV events
+ name: ovirt_event_info
+ namespace: ''
+ - description: Module to manage external providers in oVirt/RHV
+ name: ovirt_external_provider
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV external providers
+ name: ovirt_external_provider_info
+ namespace: ''
+ - description: Module to manage groups in oVirt/RHV
+ name: ovirt_group
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV groups
+ name: ovirt_group_info
+ namespace: ''
+ - description: Module to manage hosts in oVirt/RHV
+ name: ovirt_host
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV hosts
+ name: ovirt_host_info
+ namespace: ''
+ - description: Module to manage host networks in oVirt/RHV
+ name: ovirt_host_network
+ namespace: ''
+ - description: Module to manage power management of hosts in oVirt/RHV
+ name: ovirt_host_pm
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV HostStorages (applicable
+ only for block storage)
+ name: ovirt_host_storage_info
+ namespace: ''
+ - description: Module to manage Instance Types in oVirt/RHV
+ name: ovirt_instance_type
+ namespace: ''
+ - description: Module to manage jobs in oVirt/RHV
+ name: ovirt_job
+ namespace: ''
+ - description: Module to manage MAC pools in oVirt/RHV
+ name: ovirt_mac_pool
+ namespace: ''
+ - description: Module to manage logical networks in oVirt/RHV
+ name: ovirt_network
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV networks
+ name: ovirt_network_info
+ namespace: ''
+ - description: Module to manage network interfaces of Virtual Machines in oVirt/RHV
+ name: ovirt_nic
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV virtual machine
+ network interfaces
+ name: ovirt_nic_info
+ namespace: ''
+ - description: Module to manage permissions of users/groups in oVirt/RHV
+ name: ovirt_permission
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV permissions
+ name: ovirt_permission_info
+ namespace: ''
+ - description: Module to manage datacenter quotas in oVirt/RHV
+ name: ovirt_quota
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV quotas
+ name: ovirt_quota_info
+ namespace: ''
+ - description: Module to manage roles in oVirt/RHV
+ name: ovirt_role
+ namespace: ''
+ - description: Retrieve information about one or more oVirt scheduling policies
+ name: ovirt_scheduling_policy_info
+ namespace: ''
+ - description: Module to manage Virtual Machine Snapshots in oVirt/RHV
+ name: ovirt_snapshot
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV virtual machine
+ snapshots
+ name: ovirt_snapshot_info
+ namespace: ''
+ - description: Module to manage storage connections in oVirt
+ name: ovirt_storage_connection
+ namespace: ''
+ - description: Module to manage storage domains in oVirt/RHV
+ name: ovirt_storage_domain
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV storage domains
+ name: ovirt_storage_domain_info
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV templates relate
+ to a storage domain.
+ name: ovirt_storage_template_info
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV virtual machines
+ relate to a storage domain.
+ name: ovirt_storage_vm_info
+ namespace: ''
+ - description: Module to manage tags in oVirt/RHV
+ name: ovirt_tag
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV tags
+ name: ovirt_tag_info
+ namespace: ''
+ - description: Module to manage virtual machine templates in oVirt/RHV
+ name: ovirt_template
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV templates
+ name: ovirt_template_info
+ namespace: ''
+ - description: Module to manage users in oVirt/RHV
+ name: ovirt_user
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV users
+ name: ovirt_user_info
+ namespace: ''
+ - description: Module to manage Virtual Machines in oVirt/RHV
+ name: ovirt_vm
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV virtual machines
+ name: ovirt_vm_info
+ namespace: ''
+ - description: Module to manage VM pools in oVirt/RHV
+ name: ovirt_vmpool
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV vmpools
+ name: ovirt_vmpool_info
+ namespace: ''
+ - description: Module to manage vNIC profile of network in oVirt/RHV
+ name: ovirt_vnic_profile
+ namespace: ''
+ - description: Retrieve information about one or more oVirt/RHV vnic profiles
+ name: ovirt_vnic_profile_info
+ namespace: ''
+ plugins:
+ inventory:
+ - description: oVirt inventory source
+ name: ovirt
+ namespace: null
+ release_date: '2020-04-09'
+ 1.1.0:
+ changes:
+ bugfixes:
+ - ovirt_disk - Fix activate (https://github.com/oVirt/ovirt-ansible-collection/pull/61).
+ - ovirt_host_network - Fix custom_properties default value (https://github.com/oVirt/ovirt-ansible-collection/pull/65).
+ - ovirt_quota - Fix vcpu_limit (https://github.com/oVirt/ovirt-ansible-collection/pull/44).
+ - ovirt_vm - Fix cd_iso get all disks from storage domains (https://github.com/oVirt/ovirt-ansible-collection/pull/66).
+ - ovirt_vm - Fix cd_iso search by name (https://github.com/oVirt/ovirt-ansible-collection/pull/51).
+ major_changes:
+ - ovirt_disk - Add backup (https://github.com/oVirt/ovirt-ansible-collection/pull/57).
+ - ovirt_disk - Support direct upload/download (https://github.com/oVirt/ovirt-ansible-collection/pull/35).
+ - ovirt_host - Add ssh_port (https://github.com/oVirt/ovirt-ansible-collection/pull/60).
+ - ovirt_vm_os_info - Creation of module (https://github.com/oVirt/ovirt-ansible-collection/pull/26).
+ minor_changes:
+ - ovirt inventory - Add creation_time (https://github.com/oVirt/ovirt-ansible-collection/pull/34).
+ - ovirt inventory - Set inventory plugin insecure if no cafile defined (https://github.com/oVirt/ovirt-ansible-collection/pull/58).
+ - ovirt_disk - Add upload image warning for correct format (https://github.com/oVirt/ovirt-ansible-collection/pull/22).
+ - ovirt_disk - Force wait when uploading disk (https://github.com/oVirt/ovirt-ansible-collection/pull/43).
+ - ovirt_disk - Upload_image_path autodetect size (https://github.com/oVirt/ovirt-ansible-collection/pull/19).
+ - ovirt_network - Add support of removing vlan_tag (https://github.com/oVirt/ovirt-ansible-collection/pull/21).
+ - ovirt_vm - Add documentation for custom_script under sysprep (https://github.com/oVirt/ovirt-ansible-collection/pull/52).
+ - ovirt_vm - Hard code nic on_boot to true (https://github.com/oVirt/ovirt-ansible-collection/pull/45).
+ fragments:
+ - 19-ovirt_disk-upload_image_path-autodetect-size.yaml
+ - 21-ovirt_network-add-support-of-removing-vlan_tag.yaml
+ - 22-ovirt_disk-add-upload-image-warning-for-correct-format.yaml
+ - 26-add-ovirt_vm_os_info.yaml
+ - 34-ovirt-inventory-add-creation_time.yaml
+ - 35-ovirt_disk-support-direct-upload-download.yaml
+ - 43-ovirt_disk-force-wait-when-uploading-disk.yaml
+ - 44-ovirt_quota-fix-vcpu_limit-type.yaml
+ - 45-ovirt_vm-hard-code-nic-on_boot-to-true.yaml
+ - 51-ovirt_vm-fix-cd_iso-search-by-name.yaml
+ - 52-ovirt_vm-add-documentation-for-custom_script-under-sysprep.yaml
+ - 57-ovirt_disk-add-backup.yaml
+ - 58-ovirt-inventory-insecure-if-no-cafile-defined.yaml
+ - 60-ovirt_host-add-ssh_port.yaml
+ - 61-ovirt_disk-fix-activate.yaml
+ - 65-ovirt_host_network-fix-custom_properties-default-value.yaml
+ - 66-ovirt_vm-fix-cd_iso-get-all-disks-from-storage-domains.yaml
+ modules:
+ - description: Retrieve information on all supported oVirt/RHV operating systems
+ name: ovirt_vm_os_info
+ namespace: ''
+ release_date: '2020-08-12'
+ 1.1.1:
+ changes:
+ minor_changes:
+ - ovirt_permission - Fix FQCN documentation (https://github.com/oVirt/ovirt-ansible-collection/pull/63).
+ release_date: '2020-08-12'
+ 1.1.2:
+ release_date: '2020-08-17'
+ 1.2.0:
+ changes:
+ bugfixes:
+ - 01_create_target_hosted_engine_vm - Force basic authentication (https://github.com/oVirt/ovirt-ansible-collection/pull/131).
+ - hosted_engine_setup - Allow uppercase characters in mac address (https://github.com/oVirt/ovirt-ansible-collection/pull/150).
+ - hosted_engine_setup - set custom bios type of hosted-engine VM to Q35+SeaBIOS
+ (https://github.com/oVirt/ovirt-ansible-collection/pull/129).
+ - hosted_engine_setup - use zcat instead of gzip (https://github.com/oVirt/ovirt-ansible-collection/pull/130).
+ - ovirt inventory - Add close of connection at the end (https://github.com/oVirt/ovirt-ansible-collection/pull/122).
+ - ovirt_disk - dont move disk when already in storage_domain (https://github.com/oVirt/ovirt-ansible-collection/pull/135)
+ - ovirt_disk - fix upload when direct upload fails (https://github.com/oVirt/ovirt-ansible-collection/pull/120).
+ - ovirt_vm - Fix template search (https://github.com/oVirt/ovirt-ansible-collection/pull/132).
+ - ovirt_vm - Rename q35_sea to q35_sea_bios (https://github.com/oVirt/ovirt-ansible-collection/pull/111).
+ major_changes:
+ - cluster_upgrade - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/94).
+ - disaster_recovery - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/134).
+ - engine_setup - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/69).
+ - hosted_engine_setup - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/106).
+ - image_template - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/95).
+ - infra - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/92).
+ - manageiq - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/97).
+ - repositories - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/96).
+ - shutdown_env - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/112).
+ - vm_infra - Migrate role (https://github.com/oVirt/ovirt-ansible-collection/pull/93).
+ minor_changes:
+ - Add GPL license (https://github.com/oVirt/ovirt-ansible-collection/pull/101).
+ - hosted_engine_setup - Add compatibility_version (https://github.com/oVirt/ovirt-ansible-collection/pull/125).
+ - ovirt_disk - ignore move of HE disks (https://github.com/oVirt/ovirt-ansible-collection/pull/162).
+ - ovirt_nic - Add template_version (https://github.com/oVirt/ovirt-ansible-collection/pull/145).
+ - ovirt_nic_info - Add template (https://github.com/oVirt/ovirt-ansible-collection/pull/146).
+ - ovirt_vm_info - Add current_cd (https://github.com/oVirt/ovirt-ansible-collection/pull/144).
+ fragments:
+ - add-cluster_upgrade-role.yml
+ - add-disaster_recovery-role.yml
+ - add-engine_setup-role.yml
+ - add-gpl-license.yml
+ - add-hosted_engine_setup-role.yml
+ - add-image_template-role.yml
+ - add-infra-role.yml
+ - add-manageiq-role.yml
+ - add-repositories-role.yml
+ - add-shutdown_env-role.yml
+ - add-vm_infra-role.yml
+ - basic_auth-fix_create_target_hosted_engine_vm.yml
+ - hosted_engine_setup-add-compatibility_version.yml
+ - hosted_engine_setup-allow-uppercase-in-mac-address.yml
+ - hosted_engine_setup-set-custom-bios-type.yml
+ - hosted_engine_setup-use-zcat-instead-of-gzip.yml
+ - ovirt-inventory-add-connection-close.yml
+ - ovirt_disk-fix-move.yml
+ - ovirt_disk-fix-upload-when-direct-upload-fails.yml
+ - ovirt_disk-ignore-he-disk-move.yml
+ - ovirt_nic-add-template_version.yml
+ - ovirt_nic_info-add-template.yml
+ - ovirt_vm-fix-template-search.yml
+ - ovirt_vm-rename-q35_sea.yml
+ - ovirt_vm_info-add-current_cd.yml
+ release_date: '2020-10-27'
+ 1.2.1:
+ changes:
+ bugfixes:
+ - disaster_recovery - Fix multiple configuration issues like paths, "~" support,
+ user input messages, etc. (https://github.com/oVirt/ovirt-ansible-collection/pull/160).
+ fragments:
+ - disaster_recovery-fix-configuration-issues.yml
+ release_date: '2020-11-02'
+ 1.2.2:
+ changes:
+ bugfixes:
+ - hosted_engine_setup - Clean VNC encryption config (https://github.com/oVirt/ovirt-ansible-collection/pull/175/).
+ - inventory plugin - Fix timestamp for Python 2 (https://github.com/oVirt/ovirt-ansible-collection/pull/173).
+ fragments:
+ - hosted_engine_setup-clean-vnc-encryption-config.yml
+ - inventory-plugin-fix-python2-timestamp-issue.yml
+ release_date: '2020-11-12'
+ 1.2.3:
+ changes:
+ minor_changes:
+ - engine_setup - Add missing restore task file and vars file (https://github.com/oVirt/ovirt-ansible-collection/pull/180).
+ - hosted_engine_setup - Add after_add_host hook (https://github.com/oVirt/ovirt-ansible-collection/pull/181).
+ fragments:
+ - engine_setup-add-missing-restore-file.yml
+ - he_add-after_add_host-hook.yml
+ release_date: '2020-11-30'
+ 1.2.4:
+ changes:
+ minor_changes:
+ - infra - don't require passowrd for user (https://github.com/oVirt/ovirt-ansible-collection/pull/195).
+ - inventory - correct os_type name (https://github.com/oVirt/ovirt-ansible-collection/pull/194).
+ - ovirt_disk - automatically detect virtual size of qcow image (https://github.com/oVirt/ovirt-ansible-collection/pull/183).
+ fragments:
+ - 183-ovirt_disk-fix-upload-detection.yml
+ - 194-inventory-correct-name-of-os_type.yml
+ - 195-infra-dont-require-password-for-user.yml
+ release_date: '2020-12-14'
+ 1.3.0:
+ changes:
+ major_changes:
+ - ovirt_system_option_info - Add new module (https://github.com/oVirt/ovirt-ansible-collection/pull/206).
+ minor_changes:
+ - ansible-builder - Update bindep (https://github.com/oVirt/ovirt-ansible-collection/pull/197).
+ - hosted_engine_setup - Collect all engine /var/log (https://github.com/oVirt/ovirt-ansible-collection/pull/202).
+ - hosted_engine_setup - Use ovirt_system_option_info instead of REST API (https://github.com/oVirt/ovirt-ansible-collection/pull/209).
+ - ovirt_disk - Add install warning (https://github.com/oVirt/ovirt-ansible-collection/pull/208).
+ - ovirt_info - Fragment add auth suboptions to documentation (https://github.com/oVirt/ovirt-ansible-collection/pull/205).
+ fragments:
+ - 197-update-bindep.yml
+ - 202-hosted_engine_setup-collect-all-engine-log.yml
+ - 205-ovirt_info-fragment-add-auth-suboptions-docs.yml
+ - 206-add-ovirt_system_option_info.yml
+ - 208-ovirt_disk-add-install-warning.yml
+ - 209-hosted_engine_setup-use-ovirt_system_option_info.yml
+ release_date: '2021-01-28'
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/config.yaml b/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/config.yaml
new file mode 100644
index 00000000..929a0400
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/config.yaml
@@ -0,0 +1,31 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+ignore_other_fragment_extensions: true
+keep_fragments: false
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: ovirt.ovirt
+trivial_section_name: trivial
+use_fqcn: true
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/fragments/.gitignore b/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/fragments/.gitignore
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/changelogs/fragments/.gitignore
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/examples/filters/ovirtdiff.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/examples/filters/ovirtdiff.yml
new file mode 100644
index 00000000..bd9c1ffd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/examples/filters/ovirtdiff.yml
@@ -0,0 +1,33 @@
+- hosts: localhost
+ connection: local
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+ vars:
+ myvm: centos7
+ tasks:
+ - name: Login
+ ovirt_auth:
+ url: "https://ovirt-engine.example.com/ovirt-engine/api"
+ password: "{{ engine_password | default(omit) }}"
+ username: "admin@internal"
+
+ - name: Get VM myvm
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: 'name={{ myvm }}'
+ next_run: false
+ register: vm
+
+ - name: Get next_run of VM myvm
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: 'name={{ myvm }}'
+ next_run: true
+ register: vm_next_run
+
+ - name: Print what will be changed in next run of the VM
+ debug:
+ msg: "{{ vm.ovirt_vms[0] | ovirt.ovirt.ovirtdiff(vm_next_run.ovirt_vms[0]) }}"
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/examples/filters/vmips.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/examples/filters/vmips.yml
new file mode 100644
index 00000000..24706c6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/examples/filters/vmips.yml
@@ -0,0 +1,77 @@
+- hosts: localhost
+ connection: local
+ vars:
+ myvm: centos8*
+ tasks:
+ - name: Get VMs
+ ovirt_vm_info:
+ auth:
+ url: "https://ovirt-engine.example.com/ovirt-engine/api"
+ username: "admin@internal"
+ password: "123456"
+ insecure: true
+ pattern: 'name={{ myvm }}'
+ fetch_nested: true
+ nested_attributes: ips
+ register: vms
+ - name: Print VM IP
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmip }}"
+
+ - name: Print VM all IPs
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmips }}"
+
+ - name: Print VM IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv4 }}"
+
+ - name: Print VM all IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv4 }}"
+
+ - name: Print VM all IPv4 from specific network
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv4(network_ip='192.168.2.0/24') }}"
+
+ - name: Print VM IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv6 }}"
+
+ - name: Print VM all IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv6 }}"
+
+ # *********************************************************
+ # *********************************************************
+ - name: ----
+ debug:
+ msg: "-------------------------------------"
+ # *********************************************************
+ # Print VM IPs as dictionaries with name as key
+ # *********************************************************
+ - name: Print VM IP
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmip(attr='name') }}"
+
+ - name: Print VM all IPs
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmips(attr='name') }}"
+
+ - name: Print VM IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv4(attr='name') }}"
+
+ - name: Print VM all IPv4
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv4(attr='name') }}"
+
+ - name: Print VM IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipv6(attr='name') }}"
+
+ - name: Print VM all IPv6
+ debug:
+ msg: "{{ vms.ovirt_vms | ovirt.ovirt.ovirtvmipsv6(attr='name') }}"
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/examples/ovirt_ansible_collections.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/examples/ovirt_ansible_collections.yml
new file mode 100644
index 00000000..866aab15
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/examples/ovirt_ansible_collections.yml
@@ -0,0 +1,21 @@
+---
+- name: oVirt ansible collection
+ hosts: localhost
+ connection: local
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+ tasks:
+ - name: Login
+ ovirt_auth:
+ url: "https://ovirt-engine.example.com/ovirt-engine/api"
+ password: "{{ engine_password | default(omit) }}"
+ username: "admin@internal"
+ - name: Create vm
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ name: vm_name
+ state: present
+ cluster: Default
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/licenses/Apache-license.txt b/collections-debian-merged/ansible_collections/ovirt/ovirt/licenses/Apache-license.txt
new file mode 100644
index 00000000..c4ea8b6f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/licenses/Apache-license.txt
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Red Hat, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/licenses/GPL-license.txt b/collections-debian-merged/ansible_collections/ovirt/ovirt/licenses/GPL-license.txt
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/licenses/GPL-license.txt
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/meta/runtime.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/meta/runtime.yml
new file mode 100644
index 00000000..1b0ce723
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/meta/runtime.yml
@@ -0,0 +1,82 @@
+---
+requires_ansible: '>=2.9.10'
+action_groups:
+ ovirt:
+ - ovirt_affinity_group
+ - ovirt_affinity_label_facts
+ - ovirt_affinity_label_info
+ - ovirt_affinity_label
+ - ovirt_api_facts
+ - ovirt_api_info
+ - ovirt_auth
+ - ovirt_cluster_facts
+ - ovirt_cluster_info
+ - ovirt_cluster
+ - ovirt_datacenter_facts
+ - ovirt_datacenter_info
+ - ovirt_datacenter
+ - ovirt_disk_facts
+ - ovirt_disk_info
+ - ovirt_disk
+ - ovirt_event_facts
+ - ovirt_event_info
+ - ovirt_event
+ - ovirt_external_provider_facts
+ - ovirt_external_provider_info
+ - ovirt_external_provider
+ - ovirt_group_facts
+ - ovirt_group_info
+ - ovirt_group
+ - ovirt_host_facts
+ - ovirt_host_info
+ - ovirt_host_network
+ - ovirt_host_pm
+ - ovirt_host
+ - ovirt_host_storage_facts
+ - ovirt_host_storage_info
+ - ovirt_instance_type
+ - ovirt_job
+ - ovirt_mac_pool
+ - ovirt_network_facts
+ - ovirt_network_info
+ - ovirt_network
+ - ovirt_nic_facts
+ - ovirt_nic_info
+ - ovirt_nic
+ - ovirt_permission_facts
+ - ovirt_permission_info
+ - ovirt_permission
+ - ovirt_quota_facts
+ - ovirt_quota_info
+ - ovirt_quota
+ - ovirt_role
+ - ovirt_scheduling_policy_facts
+ - ovirt_scheduling_policy_info
+ - ovirt_snapshot_facts
+ - ovirt_snapshot_info
+ - ovirt_snapshot
+ - ovirt_storage_connection
+ - ovirt_storage_domain_facts
+ - ovirt_storage_domain_info
+ - ovirt_storage_domain
+ - ovirt_storage_template_facts
+ - ovirt_storage_template_info
+ - ovirt_storage_vm_facts
+ - ovirt_storage_vm_info
+ - ovirt_tag_facts
+ - ovirt_tag_info
+ - ovirt_tag
+ - ovirt_template_facts
+ - ovirt_template_info
+ - ovirt_template
+ - ovirt_user_facts
+ - ovirt_user_info
+ - ovirt_user
+ - ovirt_vm_facts
+ - ovirt_vm_info
+ - ovirt_vmpool_facts
+ - ovirt_vmpool_info
+ - ovirt_vmpool
+ - ovirt_vm
+ - ovirt_vnic_profile_info
+ - ovirt_vnic_profile
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec b/collections-debian-merged/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec
new file mode 100644
index 00000000..cf9f8d72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec
@@ -0,0 +1,142 @@
+%global namespace ovirt
+%global collectionname ovirt
+%global ansible_collections_dir ansible/collections/ansible_collections
+
+Name: ovirt-ansible-collection
+Summary: Ansible collection to manage all ovirt modules and inventory
+Version: 1.3.0
+Release: 1%{?release_suffix}%{?dist}
+Source0: http://resources.ovirt.org/pub/src/ovirt-ansible-collection/ovirt-ansible-collection-1.3.0.tar.gz
+License: ASL 2.0 and GPLv3+
+BuildArch: noarch
+Url: http://www.ovirt.org
+
+Requires: ansible >= 2.9.11
+Requires: python3-ovirt-engine-sdk4 >= 4.4.0
+Requires: python3-netaddr
+Requires: python3-jmespath
+Requires: python3-passlib
+Requires: qemu-img
+
+Obsoletes: ovirt-ansible-cluster-upgrade
+Obsoletes: ovirt-ansible-disaster-recovery
+Obsoletes: ovirt-ansible-engine-setup
+Obsoletes: ovirt-ansible-hosted-engine-setup
+Obsoletes: ovirt-ansible-image-template
+Obsoletes: ovirt-ansible-infra
+Obsoletes: ovirt-ansible-manageiq
+Obsoletes: ovirt-ansible-repositories
+Obsoletes: ovirt-ansible-roles
+Obsoletes: ovirt-ansible-shutdown-env
+Obsoletes: ovirt-ansible-vm-infra
+
+Provides: ovirt-ansible-cluster-upgrade
+Provides: ovirt-ansible-disaster-recovery
+Provides: ovirt-ansible-engine-setup
+Provides: ovirt-ansible-hosted-engine-setup
+Provides: ovirt-ansible-image-template
+Provides: ovirt-ansible-infra
+Provides: ovirt-ansible-manageiq
+Provides: ovirt-ansible-repositories
+Provides: ovirt-ansible-roles
+Provides: ovirt-ansible-shutdown-env
+Provides: ovirt-ansible-vm-infra
+
+%description
+This Ansible collection is to manage all ovirt modules and inventory
+
+%prep
+%setup -c -q
+
+%build
+
+%install
+export PKG_DATA_DIR_ORIG=%{_datadir}/%{ansible_collections_dir}
+export PKG_DATA_DIR=%{buildroot}$PKG_DATA_DIR_ORIG
+export PKG_DOC_DIR=%{buildroot}%{_pkgdocdir}
+sh build.sh install %{collectionname}
+
+%files
+%{_datadir}/%{ansible_collections_dir}/%{namespace}
+%if "%{collectionname}" == "rhv"
+%{_datadir}/%{ansible_collections_dir}/ovirt
+%endif
+
+%doc README.md
+%doc examples/
+
+%license licenses
+
+%changelog
+* Thu Jan 28 2021 Martin Necas <mnecas@redhat.com> - 1.3.0-1
+- ovirt_system_option_info - Add new module
+- ansible-builder - Update bindep
+- hosted_engine_setup - Collect all engine /var/log
+- hosted_engine_setup - Use ovirt_system_option_info instead of REST API
+- ovirt_disk - Add install warning
+- ovirt_info - Fragment add auth suboptions to documentation
+
+* Mon Dec 14 2020 Martin Necas <mnecas@redhat.com> - 1.2.4-1
+- infra - Allow remove of user without password
+- inventory plugin - Correct os_type name
+- ovirt_disk - automatically detect virtual size of qcow image
+
+* Mon Nov 30 2020 Martin Necas <mnecas@redhat.com> - 1.2.3-1
+- Add hosted_engine_setup after_add_host hook
+- Add engine_setup restore files
+
+* Thu Nov 12 2020 Martin Perina <mperina@redhat.com> - 1.2.2-1
+- inventory plugin - Fix Python 2 timestamp issue
+- hosted_engine_setup - Clean VNC encryption config
+- RPM packaging - Add Provides to previous oVirt Ansible roles RPMs to
+ minimize upgrade issues
+
+* Mon Nov 2 2020 Martin Necas <mnecas@redhat.com> - 1.2.1-1
+- Split README for build and GitHub
+- Add ovirt_repositories_disable_gpg_check to repositories
+
+* Tue Oct 27 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-1
+- Fix ovirt_disk ignore moving of hosted engine disks
+- Obsolete old roles
+
+* Mon Oct 12 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.2
+- Add role disaster_recovery
+- Fix engine_setup yum.conf
+- Fix hosted_engine_setup - Allow uppercase characters in mac address
+
+* Mon Oct 12 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.2
+- Add ovirt_vm_info current_cd
+- Add ovirt_nic_info template
+- Add ovirt_nic template_version
+- Fix ovirt_disk move
+- Fix ovirt inventory connection close
+- Fix ovirt_vm rename q35_sea to q35_sea_bios
+- Fix ovirt_vm template search
+
+* Wed Sep 16 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.1
+- Add role cluster_upgrade
+- Add role engine_setup
+- Add role vm_infra
+- Add role infra
+- Add role manageiq
+- Add role hosted_engine_setup
+- Add role image_template
+- Add role shutdown_env
+
+* Mon Aug 17 2020 Martin Necas <mnecas@redhat.com> - 1.1.2-1
+- Add ansible changelogs
+
+* Wed Aug 12 2020 Martin Necas <mnecas@redhat.com> - 1.1.1-1
+- Fix ovirt_permission FQCNs
+
+* Wed Aug 12 2020 Martin Necas <mnecas@redhat.com> - 1.1.0-1
+- Add ovirt_vm_os_info module
+- Add ovirt_disk backup
+- Add ovirt_disk autodetect size when uploading
+- Add ovirt_host add ssh_port
+- Add ovirt_network support of removing vlan_tag
+- Fix ovirt_disk upload
+
+
+* Thu Apr 9 2020 Martin Necas <mnecas@redhat.com> - 1.0.0-1
+- Initial release
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec.in b/collections-debian-merged/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec.in
new file mode 100644
index 00000000..3a08d64d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/ovirt-ansible-collection.spec.in
@@ -0,0 +1,142 @@
+%global namespace ovirt
+%global collectionname ovirt
+%global ansible_collections_dir ansible/collections/ansible_collections
+
+Name: @PACKAGE_NAME@
+Summary: Ansible collection to manage all ovirt modules and inventory
+Version: @RPM_VERSION@
+Release: @RPM_RELEASE@%{?release_suffix}%{?dist}
+Source0: http://resources.ovirt.org/pub/src/@PACKAGE_NAME@/@PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz
+License: ASL 2.0 and GPLv3+
+BuildArch: noarch
+Url: http://www.ovirt.org
+
+Requires: ansible >= 2.9.11
+Requires: python3-ovirt-engine-sdk4 >= 4.4.0
+Requires: python3-netaddr
+Requires: python3-jmespath
+Requires: python3-passlib
+Requires: qemu-img
+
+Obsoletes: ovirt-ansible-cluster-upgrade
+Obsoletes: ovirt-ansible-disaster-recovery
+Obsoletes: ovirt-ansible-engine-setup
+Obsoletes: ovirt-ansible-hosted-engine-setup
+Obsoletes: ovirt-ansible-image-template
+Obsoletes: ovirt-ansible-infra
+Obsoletes: ovirt-ansible-manageiq
+Obsoletes: ovirt-ansible-repositories
+Obsoletes: ovirt-ansible-roles
+Obsoletes: ovirt-ansible-shutdown-env
+Obsoletes: ovirt-ansible-vm-infra
+
+Provides: ovirt-ansible-cluster-upgrade
+Provides: ovirt-ansible-disaster-recovery
+Provides: ovirt-ansible-engine-setup
+Provides: ovirt-ansible-hosted-engine-setup
+Provides: ovirt-ansible-image-template
+Provides: ovirt-ansible-infra
+Provides: ovirt-ansible-manageiq
+Provides: ovirt-ansible-repositories
+Provides: ovirt-ansible-roles
+Provides: ovirt-ansible-shutdown-env
+Provides: ovirt-ansible-vm-infra
+
+%description
+This Ansible collection is to manage all ovirt modules and inventory
+
+%prep
+%setup -c -q
+
+%build
+
+%install
+export PKG_DATA_DIR_ORIG=%{_datadir}/%{ansible_collections_dir}
+export PKG_DATA_DIR=%{buildroot}$PKG_DATA_DIR_ORIG
+export PKG_DOC_DIR=%{buildroot}%{_pkgdocdir}
+sh build.sh install %{collectionname}
+
+%files
+%{_datadir}/%{ansible_collections_dir}/%{namespace}
+%if "%{collectionname}" == "rhv"
+%{_datadir}/%{ansible_collections_dir}/ovirt
+%endif
+
+%doc README.md
+%doc examples/
+
+%license licenses
+
+%changelog
+* Thu Jan 28 2021 Martin Necas <mnecas@redhat.com> - 1.3.0-1
+- ovirt_system_option_info - Add new module
+- ansible-builder - Update bindep
+- hosted_engine_setup - Collect all engine /var/log
+- hosted_engine_setup - Use ovirt_system_option_info instead of REST API
+- ovirt_disk - Add install warning
+- ovirt_info - Fragment add auth suboptions to documentation
+
+* Mon Dec 14 2020 Martin Necas <mnecas@redhat.com> - 1.2.4-1
+- infra - Allow remove of user without password
+- inventory plugin - Correct os_type name
+- ovirt_disk - automatically detect virtual size of qcow image
+
+* Mon Nov 30 2020 Martin Necas <mnecas@redhat.com> - 1.2.3-1
+- Add hosted_engine_setup after_add_host hook
+- Add engine_setup restore files
+
+* Thu Nov 12 2020 Martin Perina <mperina@redhat.com> - 1.2.2-1
+- inventory plugin - Fix Python 2 timestamp issue
+- hosted_engine_setup - Clean VNC encryption config
+- RPM packaging - Add Provides to previous oVirt Ansible roles RPMs to
+ minimize upgrade issues
+
+* Mon Nov 2 2020 Martin Necas <mnecas@redhat.com> - 1.2.1-1
+- Split README for build and GitHub
+- Add ovirt_repositories_disable_gpg_check to repositories
+
+* Tue Oct 27 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-1
+- Fix ovirt_disk ignore moving of hosted engine disks
+- Obsolete old roles
+
+* Mon Oct 12 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.2
+- Add role disaster_recovery
+- Fix engine_setup yum.conf
+- Fix hosted_engine_setup - Allow uppercase characters in mac address
+
+* Mon Oct 12 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.2
+- Add ovirt_vm_info current_cd
+- Add ovirt_nic_info template
+- Add ovirt_nic template_version
+- Fix ovirt_disk move
+- Fix ovirt inventory connection close
+- Fix ovirt_vm rename q35_sea to q35_sea_bios
+- Fix ovirt_vm template search
+
+* Wed Sep 16 2020 Martin Necas <mnecas@redhat.com> - 1.2.0-0.1
+- Add role cluster_upgrade
+- Add role engine_setup
+- Add role vm_infra
+- Add role infra
+- Add role manageiq
+- Add role hosted_engine_setup
+- Add role image_template
+- Add role shutdown_env
+
+* Mon Aug 17 2020 Martin Necas <mnecas@redhat.com> - 1.1.2-1
+- Add ansible changelogs
+
+* Wed Aug 12 2020 Martin Necas <mnecas@redhat.com> - 1.1.1-1
+- Fix ovirt_permission FQCNs
+
+* Wed Aug 12 2020 Martin Necas <mnecas@redhat.com> - 1.1.0-1
+- Add ovirt_vm_os_info module
+- Add ovirt_disk backup
+- Add ovirt_disk autodetect size when uploading
+- Add ovirt_host add ssh_port
+- Add ovirt_network support of removing vlan_tag
+- Fix ovirt_disk upload
+
+
+* Thu Apr 9 2020 Martin Necas <mnecas@redhat.com> - 1.0.0-1
+- Initial release
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/callback/stdout.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/callback/stdout.py
new file mode 100644
index 00000000..64440592
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/callback/stdout.py
@@ -0,0 +1,56 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.callback import CallbackBase
+
+# Not only visible to ansible-doc, it also 'declares' the options the plugin
+# requires and how to configure them.
+DOCUMENTATION = '''
+ callback: stdout
+ callback_type: aggregate
+ short_description: Output the log of ansible
+ version_added: "2.0"
+ description:
+ - This callback output the log of ansible play tasks.
+'''
+
+
+class CallbackModule(CallbackBase):
+ """
+ This callback module output the information with a specific style.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'stdout'
+
+ # only needed if you ship it and don't want to enable by default
+ CALLBACK_NEEDS_WHITELIST = False
+
+ def __init__(self):
+
+ # make sure the expected objects are present, calling the base's
+ # __init__
+ super(CallbackModule, self).__init__()
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self._display.display('FAILED: %s %s' % (host, res))
+
+ def runner_on_ok(self, host, res):
+ self._display.display('OK: %s %s' % (host, res))
+
+ def runner_on_skipped(self, host, item=None):
+ self._display.display('SKIPPED: %s' % host)
+
+ def runner_on_unreachable(self, host, res):
+ self._display.display('UNREACHABLE: %s %s' % (host, res))
+
+ def runner_on_async_failed(self, host, res, jid):
+ self._display.display('ASYNC_FAILED: %s %s %s' % (host, res, jid))
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ self._display.display('IMPORTED: %s %s' % (host, imported_file))
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ self._display.display('NOTIMPORTED: %s %s' % (host, missing_file))
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt.py
new file mode 100644
index 00000000..bb9a5771
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard oVirt documentation fragment
+ DOCUMENTATION = r'''
+options:
+ wait:
+ description:
+ - "C(yes) if the module should wait for the entity to get into desired state."
+ type: bool
+ default: yes
+ fetch_nested:
+ description:
+ - "If I(True) the module will fetch additional data from the API."
+ - "It will fetch IDs of the VMs disks, snapshots, etc. User can configure to fetch other
+ attributes of the nested entities by specifying C(nested_attributes)."
+ type: bool
+ nested_attributes:
+ description:
+ - "Specifies list of the attributes which should be fetched from the API."
+ - "This parameter apply only when C(fetch_nested) is I(true)."
+ type: list
+ elements: str
+ auth:
+ description:
+ - "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
+ suboptions:
+ username:
+ description:
+ - The name of the user, something like I(admin@internal).
+ - Default value is set by C(OVIRT_USERNAME) environment variable.
+ type: str
+ required: true
+ password:
+ description:
+ - The password of the user.
+ - Default value is set by C(OVIRT_PASSWORD) environment variable.
+ type: str
+ required: true
+ url:
+ description:
+ - A string containing the API URL of the server, usually something like `I(https://server.example.com/ovirt-engine/api)`.
+ - Default value is set by C(OVIRT_URL) environment variable.
+ - Either C(url) or C(hostname) is required.
+ type: str
+ hostname:
+ description:
+ - A string containing the hostname of the server, usually something like `I(server.example.com)`.
+ - Default value is set by C(OVIRT_HOSTNAME) environment variable.
+ - Either C(url) or C(hostname) is required.
+ type: str
+ token:
+ description:
+ - Token to be used instead of login with username/password.
+ - Default value is set by C(OVIRT_TOKEN) environment variable.
+ type: str
+ insecure:
+ description:
+ - A boolean flag that indicates if the server TLS certificate and host name should be checked.
+ type: bool
+ ca_file:
+ description:
+ - A PEM file containing the trusted CA certificates.
+ - The certificate presented by the server will be verified using these CA certificates.
+ - If C(ca_file) parameter is not set, system wide CA certificate store is used.
+ - Default value is set by C(OVIRT_CAFILE) environment variable.
+ type: str
+ kerberos:
+ description:
+ - A boolean flag indicating if Kerberos authentication should be used instead of the default basic authentication.
+ type: bool
+ headers:
+ description:
+ - Dictionary of HTTP headers to be added to each API call.
+ type: dict
+ type: dict
+ required: true
+ timeout:
+ description:
+ - "The amount of time in seconds the module should wait for the instance to
+ get into desired state."
+ type: int
+ default: 180
+ poll_interval:
+ description:
+ - "Number of the seconds the module waits until another poll request on entity status is sent."
+ type: int
+ default: 3
+requirements:
+ - python >= 2.7
+ - ovirt-engine-sdk-python >= 4.4.0
+notes:
+ - "In order to use this module you have to install oVirt Python SDK.
+ To ensure it's installed with correct version you can create the following task:
+ I(pip: name=ovirt-engine-sdk-python version=4.4.0)"
+'''
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt_info.py
new file mode 100644
index 00000000..f2801ab9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/doc_fragments/ovirt_info.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # info standard oVirt documentation fragment
+ DOCUMENTATION = r'''
+options:
+ fetch_nested:
+ description:
+ - If I(yes) the module will fetch additional data from the API.
+ - It will fetch only IDs of nested entity. It doesn't fetch multiple levels of nested attributes.
+ Only the attributes of the current entity. User can configure to fetch other
+ attributes of the nested entities by specifying C(nested_attributes).
+ type: bool
+ nested_attributes:
+ description:
+ - Specifies list of the attributes which should be fetched from the API.
+ - This parameter apply only when C(fetch_nested) is I(true).
+ type: list
+ elements: str
+ auth:
+ description:
+ - "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
+ suboptions:
+ username:
+ description:
+ - The name of the user, something like I(admin@internal).
+ - Default value is set by C(OVIRT_USERNAME) environment variable.
+ type: str
+ required: true
+ password:
+ description:
+ - The password of the user.
+ - Default value is set by C(OVIRT_PASSWORD) environment variable.
+ type: str
+ required: true
+ url:
+ description:
+ - A string containing the API URL of the server, usually something like `I(https://server.example.com/ovirt-engine/api)`.
+ - Default value is set by C(OVIRT_URL) environment variable.
+ - Either C(url) or C(hostname) is required.
+ type: str
+ hostname:
+ description:
+ - A string containing the hostname of the server, usually something like `I(server.example.com)`.
+ - Default value is set by C(OVIRT_HOSTNAME) environment variable.
+ - Either C(url) or C(hostname) is required.
+ type: str
+ token:
+ description:
+ - Token to be used instead of login with username/password.
+ - Default value is set by C(OVIRT_TOKEN) environment variable.
+ type: str
+ insecure:
+ description:
+ - A boolean flag that indicates if the server TLS certificate and host name should be checked.
+ type: bool
+ ca_file:
+ description:
+ - A PEM file containing the trusted CA certificates.
+ - The certificate presented by the server will be verified using these CA certificates.
+ - If C(ca_file) parameter is not set, system wide CA certificate store is used.
+ - Default value is set by C(OVIRT_CAFILE) environment variable.
+ type: str
+ kerberos:
+ description:
+ - A boolean flag indicating if Kerberos authentication should be used instead of the default basic authentication.
+ type: bool
+ headers:
+ description:
+ - Dictionary of HTTP headers to be added to each API call.
+ type: dict
+ type: dict
+ required: true
+requirements:
+ - python >= 2.7
+ - ovirt-engine-sdk-python >= 4.4.0
+notes:
+ - "In order to use this module you have to install oVirt Python SDK.
+ To ensure it's installed with correct version you can create the following task:
+ pip: name=ovirt-engine-sdk-python version=4.4.0"
+'''
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.py
new file mode 100644
index 00000000..e6d46a33
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/filter/ovirtvmip.py
@@ -0,0 +1,147 @@
+'Module to create filter to find IP addresses in VMs'
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import socket
+import struct
+
+
+class FilterModule(object):
+ 'Filter for IP addresses on newly created VMs'
+
+ def filters(self):
+ 'Define filters'
+ return {
+ 'ovirtvmip': self.ovirtvmip,
+ 'ovirtvmips': self.ovirtvmips,
+ 'ovirtvmipv4': self.ovirtvmipv4,
+ 'ovirtvmipsv4': self.ovirtvmipsv4,
+ 'ovirtvmipv6': self.ovirtvmipv6,
+ 'ovirtvmipsv6': self.ovirtvmipsv6,
+ 'filtervalue': self.filtervalue,
+ 'removesensitivevmdata': self.removesensitivevmdata,
+ 'ovirtdiff': self.ovirtdiff,
+ }
+
+ def ovirtdiff(self, vm1, vm2):
+ """
+ This filter takes two dictionaries of two different resources and compare
+ them. It return dictionari with keys 'before' and 'after', where 'before'
+ containes old values of resources and 'after' contains new values.
+ This is mainly good to compare current VM object and next run VM object to see
+ the difference for the next_run.
+ """
+ before = []
+ after = []
+ if vm1.get('next_run_configuration_exists'):
+ keys = [
+ key for key in set(list(vm1.keys()) + list(vm2.keys()))
+ if (key in vm1 and (key not in vm2 or vm2[key] != vm1[key])) or (key in vm2 and (key not in vm1 or vm1[key] != vm2[key]))
+ ]
+ for key in keys:
+ before.append((key, vm1.get(key)))
+ after.append((key, vm2.get(key, vm1.get(key))))
+
+ return {
+ 'before': dict(before),
+ 'after': dict(after),
+ }
+
+ def filtervalue(self, data, attr, value):
+ """ Filter to findall occurance of some value in dict """
+ items = []
+ for item in data:
+ if item[attr] == value:
+ items.append(item)
+ return items
+
+ def ovirtvmip(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return first IP'
+ return self.__get_first_ip(self.ovirtvmips(ovirt_vms, attr))
+
+ def ovirtvmips(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return list of IPs'
+ return self._parse_ips(ovirt_vms, attr=attr)
+
+ def ovirtvmipv4(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return first IPv4 IP'
+ return self.__get_first_ip(self.ovirtvmipsv4(ovirt_vms, attr, network_ip))
+
+ def ovirtvmipsv4(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return list of IPv4 IPs'
+ ips = self._parse_ips(ovirt_vms, lambda version: version == 'v4', attr)
+ resp = [ip for ip in ips if self.__address_in_network(ip, network_ip)]
+ return resp
+
+ def ovirtvmipv6(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return first IPv6 IP'
+ return self.__get_first_ip(self.ovirtvmipsv6(ovirt_vms, attr))
+
+ def ovirtvmipsv6(self, ovirt_vms, attr=None, network_ip=None):
+ 'Return list of IPv6 IPs'
+ return self._parse_ips(ovirt_vms, lambda version: version == 'v6', attr)
+
+ def _parse_ips(self, ovirt_vms, version_condition=lambda version: True, attr=None):
+ if not isinstance(ovirt_vms, list):
+ ovirt_vms = [ovirt_vms]
+
+ if attr is None:
+ return self._parse_ips_aslist(ovirt_vms, version_condition)
+ else:
+ return self._parse_ips_asdict(ovirt_vms, version_condition, attr)
+
+ @staticmethod
+ def _parse_ips_asdict(ovirt_vms, version_condition=lambda version: True, attr=None):
+ vm_ips = {}
+ for ovirt_vm in ovirt_vms:
+ ips = []
+ for device in ovirt_vm.get('reported_devices', []):
+ for curr_ip in device.get('ips', []):
+ if version_condition(curr_ip.get('version')):
+ ips.append(curr_ip.get('address'))
+ vm_ips[ovirt_vm.get(attr)] = ips
+ return vm_ips
+
+ @staticmethod
+ def _parse_ips_aslist(ovirt_vms, version_condition=lambda version: True):
+ ips = []
+ for ovirt_vm in ovirt_vms:
+ for device in ovirt_vm.get('reported_devices', []):
+ for curr_ip in device.get('ips', []):
+ if version_condition(curr_ip.get('version')):
+ ips.append(curr_ip.get('address'))
+ return ips
+
+ @staticmethod
+ def __get_first_ip(res):
+ return res[0] if isinstance(res, list) and res else res
+
+ def __address_in_network(self, ip, net):
+ "Return boolean if IP is in network."
+ if net:
+ ipaddr = int(''.join(['%02x' % int(x) for x in ip.split('.')]), 16)
+ netstr, bits = net.split('/')
+ netaddr = int(''.join(['%02x' % int(x)
+ for x in netstr.split('.')]), 16)
+ mask = (0xffffffff << (32 - int(bits))) & 0xffffffff
+ return (ipaddr & mask) == (netaddr & mask)
+ return True
+
+ def removesensitivevmdata(self, data, key_to_remove='root_password'):
+ for value in data:
+ if key_to_remove in value:
+ value[key_to_remove] = "******"
+ if 'cloud_init' in value and key_to_remove in value['cloud_init']:
+ value['cloud_init'][key_to_remove] = "******"
+ if 'sysprep' in value and key_to_remove in value['sysprep']:
+ value['sysprep'][key_to_remove] = "******"
+ if 'profile' in value:
+ profile = value['profile']
+ if key_to_remove in profile:
+ profile[key_to_remove] = "******"
+ if 'cloud_init' in profile and key_to_remove in profile['cloud_init']:
+ profile['cloud_init'][key_to_remove] = "******"
+ if 'sysprep' in profile and key_to_remove in profile['sysprep']:
+ profile['sysprep'][key_to_remove] = "******"
+ return data
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/inventory/ovirt.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/inventory/ovirt.py
new file mode 100644
index 00000000..5fb7ab4a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/inventory/ovirt.py
@@ -0,0 +1,270 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: ovirt
+ plugin_type: inventory
+ short_description: oVirt inventory source
+ version_added: "1.0.0"
+ author: Bram Verschueren (@bverschueren)
+ requirements:
+ - ovirt-engine-sdk-python >= 4.2.4
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ description:
+ - Get inventory hosts from the ovirt service.
+ - Requires a YAML file ending in 'ovirt.yml', 'ovirt4.yml', 'ovirt.yaml', 'ovirt4.yaml'.
+ options:
+ plugin:
+ description: the name of this plugin, it should always be set to 'ovirt' for this plugin to recognise it as it's own.
+ required: True
+ choices: ['ovirt']
+ ovirt_url:
+ description: URL to ovirt-engine API.
+ required: True
+ env:
+ - name: OVIRT_URL
+ ovirt_username:
+ description: ovirt authentication user.
+ required: True
+ env:
+ - name: OVIRT_USERNAME
+ ovirt_password:
+ description: ovirt authentication password.
+ required : True
+ env:
+ - name: OVIRT_PASSWORD
+ ovirt_cafile:
+ description: path to ovirt-engine CA file. If C(ovirt_cafile) parameter is not set and C(ovirt_insecure) is not True, system wide CA certificate store\
+ is used.
+ required: False
+ ovirt_insecure:
+ description: A boolean flag that indicates if the server TLS certificate and host name should be checked.
+ required: False
+ ovirt_query_filter:
+ required: False
+ description: dictionary of filter key-values to query VM's. See U(https://ovirt.github.io/ovirt-engine-sdk/master/services.m.html#ovirtsdk4\
+.services.VmsService.list) for filter parameters.
+ ovirt_hostname_preference:
+ required: False
+ description: list of options that describe the ordering for which hostnames should be assigned. See U(https://ovirt.github.io/ovirt-engin\
+e-api-model/master/#types/vm) for available attributes.
+ default: ['fqdn', 'name']
+ type: list
+'''
+
+EXAMPLES = '''
+# Ensure the CA is available:
+# $ wget "https://engine/ovirt-engine/services/pki-resource?resource=ca-certificate&format=X509-PEM-CA" -O /path/to/ca.pem
+# Sample content of ovirt.yml:
+plugin: ovirt.ovirt.ovirt
+ovirt_url: https://engine/ovirt-engine/api
+ovirt_cafile: /path/to/ca.pem
+ovirt_username: ansible-tester
+ovirt_password: secure
+ovirt_query_filter:
+ search: 'name=myvm AND cluster=mycluster'
+ case_sensitive: no
+ max: 15
+keyed_groups:
+ - key: cluster
+ prefix: 'cluster'
+groups:
+ dev: "'dev' in tags"
+compose:
+ ansible_host: devices["eth0"][0]
+'''
+
+import sys
+
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.errors import AnsibleError, AnsibleParserError
+
+HAS_OVIRT_LIB = False
+
+try:
+ import ovirtsdk4 as sdk
+ HAS_OVIRT_LIB = True
+except ImportError:
+ HAS_OVIRT_LIB = False
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'ovirt.ovirt.ovirt'
+
+ def _get_dict_of_struct(self, vm):
+ ''' Transform SDK Vm Struct type to Python dictionary.
+ :param vm: host struct of which to create dict
+ :return dict of vm struct type
+ '''
+
+ vms_service = self.connection.system_service().vms_service()
+ clusters_service = self.connection.system_service().clusters_service()
+ vm_service = vms_service.vm_service(vm.id)
+ devices = vm_service.reported_devices_service().list()
+ tags = vm_service.tags_service().list()
+ stats = vm_service.statistics_service().list()
+ labels = vm_service.affinity_labels_service().list()
+ groups = clusters_service.cluster_service(
+ vm.cluster.id
+ ).affinity_groups_service().list()
+
+ return {
+ 'id': vm.id,
+ 'name': vm.name,
+ 'host': self.connection.follow_link(vm.host).name if vm.host else None,
+ 'cluster': self.connection.follow_link(vm.cluster).name,
+ 'status': str(vm.status),
+ 'description': vm.description,
+ 'fqdn': vm.fqdn,
+ 'os': vm.os.type,
+ 'template': self.connection.follow_link(vm.template).name,
+ 'creation_time': str(vm.creation_time),
+ 'creation_time_timestamp': float(vm.creation_time.strftime("%s.%f")),
+ 'tags': [tag.name for tag in tags],
+ 'affinity_labels': [label.name for label in labels],
+ 'affinity_groups': [
+ group.name for group in groups
+ if vm.name in [vm.name for vm in self.connection.follow_link(group.vms)]
+ ],
+ 'statistics': dict(
+ (stat.name, stat.values[0].datum if stat.values else None) for stat in stats
+ ),
+ 'devices': dict(
+ (device.name, [ip.address for ip in device.ips]) for device in devices if device.ips
+ ),
+ }
+
+ def _query(self, query_filter=None):
+ '''
+ :param query_filter: dictionary of filter parameter/values
+ :return dict of oVirt vm dicts
+ '''
+ return [self._get_dict_of_struct(host) for host in self._get_hosts(query_filter=query_filter)]
+
+ def _get_hosts(self, query_filter=None):
+ '''
+ :param filter: dictionary of vm filter parameter/values
+ :return list of oVirt vm structs
+ '''
+
+ vms_service = self.connection.system_service().vms_service()
+ if query_filter is not None:
+ return vms_service.list(**query_filter)
+ return vms_service.list()
+
+ def _get_query_options(self, param_dict):
+ ''' Get filter parameters and cast these to comply with sdk VmsService.list param types
+ :param param_dict: dictionary of filter parameters and values
+ :return dictionary with casted parameter/value
+ '''
+ if param_dict is None:
+ return None
+
+ FILTER_MAPPING = {
+ 'all_content': bool,
+ 'case_sensitive': bool,
+ 'filter': bool,
+ 'follow': str,
+ 'max': int,
+ 'search': str
+ }
+
+ casted_dict = {}
+
+ for (param, value) in param_dict.items():
+ try:
+ casted_dict[param] = FILTER_MAPPING[param](value)
+ except KeyError:
+ raise AnsibleError("Unknown filter option '{0}'".format(param))
+
+ return casted_dict
+
+ def _get_hostname(self, host):
+ '''
+ Get the host's hostname based on prefered attribute
+ :param host: dict representation of oVirt VmStruct
+ :param return: preferred hostname for the host
+ '''
+ hostname_preference = self.get_option('ovirt_hostname_preference')
+ if not hostname_preference:
+ raise AnsibleParserError('Invalid value for option ovirt_hostname_preference: {0}'.format(hostname_preference))
+ hostname = None
+
+ for preference in hostname_preference:
+ hostname = host.get(preference)
+ if hostname is not None:
+ return hostname
+
+ raise AnsibleParserError("No valid name found for host id={0}".format(host.get('id')))
+
+ def _populate_from_source(self, source_data):
+
+ for host in source_data:
+
+ hostname = self._get_hostname(host)
+
+ self.inventory.add_host(hostname)
+
+ for fact, value in host.items():
+ self.inventory.set_variable(hostname, fact, value)
+
+ strict = self.get_option('strict')
+ self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
+ self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('ovirt.yml', 'ovirt4.yml', 'ovirt.yaml', 'ovirt4.yaml')):
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ if not HAS_OVIRT_LIB:
+ raise AnsibleError('oVirt inventory script requires ovirt-engine-sdk-python >= 4.2.4')
+
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+
+ config = self._read_config_data(path)
+
+ self.connection = sdk.Connection(
+ url=self.get_option('ovirt_url'),
+ username=self.get_option('ovirt_username'),
+ password=self.get_option('ovirt_password'),
+ ca_file=self.get_option('ovirt_cafile'),
+ insecure=self.get_option('ovirt_insecure') if self.get_option('ovirt_insecure') is not None else not self.get_option('ovirt_cafile'),
+ )
+
+ query_filter = self._get_query_options(self.get_option('ovirt_query_filter', None))
+
+ cache_key = self.get_cache_key(path)
+ source_data = None
+
+ user_cache_setting = self.get_option('cache')
+ attempt_to_read_cache = user_cache_setting and cache
+ cache_needs_update = user_cache_setting and not cache
+
+ if attempt_to_read_cache:
+ try:
+ source_data = self._cache[cache_key]
+ except KeyError:
+ cache_needs_update = True
+
+ if source_data is None:
+ source_data = self._query(query_filter=query_filter)
+
+ if cache_needs_update:
+ self._cache[cache_key] = source_data
+
+ self._populate_from_source(source_data)
+ self.connection.close()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/__init__.py
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/cloud.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/cloud.py
new file mode 100644
index 00000000..52108ca7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/cloud.py
@@ -0,0 +1,208 @@
+#
+# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+This module adds shared support for generic cloud modules
+In order to use this module, include it as part of a custom
+module as shown below.
+from ansible.module_utils.cloud import CloudRetry
+The 'cloud' module provides the following common classes:
+ * CloudRetry
+ - The base class to be used by other cloud providers, in order to
+ provide a backoff/retry decorator based on status codes.
+ - Example using the AWSRetry class which inherits from CloudRetry.
+ @AWSRetry.exponential_backoff(retries=10, delay=3)
+ get_ec2_security_group_ids_from_names()
+ @AWSRetry.jittered_backoff()
+ get_ec2_security_group_ids_from_names()
+"""
+import random
+from functools import wraps
+import syslog
+import time
+
+
+def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
+ """ Customizable exponential backoff strategy.
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Initial (base) delay.
+ backoff (float): base of the exponent to use for exponential
+ backoff.
+ max_delay (int): Optional. If provided each delay generated is capped
+ at this amount. Defaults to 60 seconds.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for an exponential backoff strategy.
+ Usage:
+ >>> backoff = _exponential_backoff()
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ sleep = delay * backoff ** retry
+ yield sleep if max_delay is None else min(sleep, max_delay)
+ return backoff_gen
+
+
+def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
+ """ Implements the "Full Jitter" backoff strategy described here
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Approximate number of seconds to sleep for the first
+ retry.
+ max_delay (int): The maximum number of seconds to sleep for any retry.
+ _random (random.Random or None): Makes this generator testable by
+ allowing developers to explicitly pass in the a seeded Random.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for a full jitter backoff strategy.
+ Usage:
+ >>> backoff = _full_jitter_backoff(retries=5)
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [3, 6, 5, 23, 38]
+ >>> list(backoff())
+ [2, 1, 6, 6, 31]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ yield _random.randint(0, min(max_delay, delay * 2 ** retry))
+ return backoff_gen
+
+
+class CloudRetry(object):
+ """ CloudRetry can be used by any cloud provider, in order to implement a
+ backoff algorithm/retry effect based on Status Code from Exceptions.
+ """
+ # This is the base class of the exception.
+ # AWS Example botocore.exceptions.ClientError
+ base_class = None
+
+ @staticmethod
+ def status_code_from_exception(error):
+ """ Return the status code from the exception object
+ Args:
+ error (object): The exception itself.
+ """
+ pass
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ """ Return True if the Response Code to retry on was found.
+ Args:
+ response_code (str): This is the Response Code that is being matched against.
+ """
+ pass
+
+ @classmethod
+ def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
+ """ Retry calling the Cloud decorated function using the provided
+ backoff strategy.
+ Args:
+ backoff_strategy (callable): Callable that returns a generator. The
+ generator should yield sleep times for each retry of the decorated
+ function.
+ """
+ def deco(f):
+ @wraps(f)
+ def retry_func(*args, **kwargs):
+ for delay in backoff_strategy():
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ if isinstance(e, cls.base_class):
+ response_code = cls.status_code_from_exception(e)
+ if cls.found(response_code, catch_extra_error_codes):
+ msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
+ syslog.syslog(syslog.LOG_INFO, msg)
+ time.sleep(delay)
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ return f(*args, **kwargs)
+
+ return retry_func # true decorator
+
+ return deco
+
+ @classmethod
+ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_exponential_backoff(
+ retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using a jittered backoff
+ strategy. More on this strategy here:
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int): Initial delay between retries in seconds
+ default=3
+ max_delay (int): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_full_jitter_backoff(
+ retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+ Compatibility for the original implementation of CloudRetry.backoff that
+ did not provide configurable backoff strategies. Developers should use
+ CloudRetry.exponential_backoff instead.
+ Kwargs:
+ tries (int): Number of times to try (not retry) before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ """
+ return cls.exponential_backoff(
+ retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/ovirt.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/ovirt.py
new file mode 100644
index 00000000..e38810ed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/module_utils/ovirt.py
@@ -0,0 +1,874 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import inspect
+import os
+import time
+
+from abc import ABCMeta, abstractmethod
+from datetime import datetime
+from distutils.version import LooseVersion
+
+from ansible_collections.ovirt.ovirt.plugins.module_utils.cloud import CloudRetry
+from ansible.module_utils.common._collections_compat import Mapping
+
+try:
+ from enum import Enum # enum is a ovirtsdk4 requirement
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.version as sdk_version
+ import ovirtsdk4.types as otypes
+ HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.4.0')
+except ImportError:
+ HAS_SDK = False
+
+
+BYTES_MAP = {
+ 'kib': 2**10,
+ 'mib': 2**20,
+ 'gib': 2**30,
+ 'tib': 2**40,
+ 'pib': 2**50,
+}
+
+
+def check_sdk(module):
+ if not HAS_SDK:
+ module.fail_json(
+ msg='ovirtsdk4 version 4.4.0 or higher is required for this module'
+ )
+
+
+def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None, filter_keys=None):
+ """
+ Convert SDK Struct type into dictionary.
+ """
+ res = {}
+
+ def resolve_href(value):
+ # Fetch nested values of struct:
+ try:
+ value = connection.follow_link(value)
+ except sdk.Error:
+ value = None
+ nested_obj = dict(
+ (attr, convert_value(getattr(value, attr)))
+ for attr in attributes if getattr(value, attr, None) is not None
+ )
+ nested_obj['id'] = getattr(value, 'id', None)
+ nested_obj['href'] = getattr(value, 'href', None)
+ return nested_obj
+
+ def remove_underscore(val):
+ if val.startswith('_'):
+ val = val[1:]
+ remove_underscore(val)
+ return val
+
+ def convert_value(value):
+ nested = False
+
+ if isinstance(value, sdk.Struct):
+ if not fetch_nested or not value.href:
+ return get_dict_of_struct(value)
+ return resolve_href(value)
+
+ elif isinstance(value, Enum) or isinstance(value, datetime):
+ return str(value)
+ elif isinstance(value, list) or isinstance(value, sdk.List):
+ if isinstance(value, sdk.List) and fetch_nested and value.href:
+ try:
+ value = connection.follow_link(value)
+ nested = True
+ except sdk.Error:
+ value = []
+
+ ret = []
+ for i in value:
+ if isinstance(i, sdk.Struct):
+ if not nested and fetch_nested and i.href:
+ ret.append(resolve_href(i))
+ elif not nested:
+ ret.append(get_dict_of_struct(i))
+ else:
+ nested_obj = dict(
+ (attr, convert_value(getattr(i, attr)))
+ for attr in attributes if getattr(i, attr, None)
+ )
+ nested_obj['id'] = getattr(i, 'id', None)
+ ret.append(nested_obj)
+ elif isinstance(i, Enum):
+ ret.append(str(i))
+ else:
+ ret.append(i)
+ return ret
+ else:
+ return value
+
+ if struct is not None:
+ for key, value in struct.__dict__.items():
+ if value is None:
+ continue
+
+ key = remove_underscore(key)
+ if filter_keys is None:
+ res[key] = convert_value(value)
+ elif key in filter_keys:
+ res[key] = convert_value(value)
+
+ return res
+
+
+def engine_version(connection):
+ """
+ Return string representation of oVirt engine version.
+ """
+ engine_api = connection.system_service().get()
+ engine_version = engine_api.product_info.version
+ return '%s.%s' % (engine_version.major, engine_version.minor)
+
+
+def create_connection(auth):
+ """
+ Create a connection to Python SDK, from task `auth` parameter.
+ If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
+ url, username, password
+
+ If user has SSO token the `auth` dictionary has following parameters mandatory:
+ url, token
+
+ The `ca_file` parameter is mandatory in case user want to use secure connection,
+ in case user want to use insecure connection, it's mandatory to send insecure=True.
+
+ :param auth: dictionary which contains needed values for connection creation
+ :return: Python SDK connection
+ """
+
+ url = auth.get('url')
+ if url is None and auth.get('hostname') is not None:
+ url = 'https://{0}/ovirt-engine/api'.format(auth.get('hostname'))
+
+ return sdk.Connection(
+ url=url,
+ username=auth.get('username'),
+ password=auth.get('password'),
+ ca_file=auth.get('ca_file', None),
+ insecure=auth.get('insecure', False),
+ token=auth.get('token', None),
+ kerberos=auth.get('kerberos', None),
+ headers=auth.get('headers', None),
+ )
+
+
+def convert_to_bytes(param):
+ """
+ This method convert units to bytes, which follow IEC standard.
+
+ :param param: value to be converted
+ """
+ if param is None:
+ return None
+
+ # Get rid of whitespaces:
+ param = ''.join(param.split())
+
+ # Convert to bytes:
+ if len(param) > 3 and param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
+ return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
+ elif param.isdigit():
+ return int(param) * 2**10
+ else:
+ raise ValueError(
+ "Unsupported value(IEC supported): '{value}'".format(value=param)
+ )
+
+
+def follow_link(connection, link):
+ """
+ This method returns the entity of the element which link points to.
+
+ :param connection: connection to the Python SDK
+ :param link: link of the entity
+ :return: entity which link points to
+ """
+
+ if link:
+ return connection.follow_link(link)
+ else:
+ return None
+
+
+def get_link_name(connection, link):
+ """
+ This method returns the name of the element which link points to.
+
+ :param connection: connection to the Python SDK
+ :param link: link of the entity
+ :return: name of the entity, which link points to
+ """
+
+ if link:
+ return connection.follow_link(link).name
+ else:
+ return None
+
+
+def equal(param1, param2, ignore_case=False):
+ """
+ Compare two parameters and return if they are equal.
+ This parameter doesn't run equal operation if first parameter is None.
+ With this approach we don't run equal operation in case user don't
+ specify parameter in their task.
+
+ :param param1: user inputted parameter
+ :param param2: value of entity parameter
+ :return: True if parameters are equal or first parameter is None, otherwise False
+ """
+ if param1 is not None:
+ if ignore_case:
+ return param1.lower() == param2.lower()
+ return param1 == param2
+ return True
+
+
+def search_by_attributes(service, list_params=None, **kwargs):
+ """
+ Search for the entity by attributes. Nested entities don't support search
+ via REST, so in case using search for nested entity we return all entities
+ and filter them by specified attributes.
+ """
+ list_params = list_params or {}
+ # Check if 'list' method support search(look for search parameter):
+ if 'search' in inspect.getargspec(service.list)[0]:
+ res = service.list(
+ # There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
+ search=' and '.join('{0}="{1}"'.format(k, v) for k, v in kwargs.items()),
+ **list_params
+ )
+ else:
+ res = [
+ e for e in service.list(**list_params) if len([
+ k for k, v in kwargs.items() if getattr(e, k, None) == v
+ ]) == len(kwargs)
+ ]
+
+ res = res or [None]
+ return res[0]
+
+
+def search_by_name(service, name, **kwargs):
+ """
+ Search for the entity by its name. Nested entities don't support search
+ via REST, so in case using search for nested entity we return all entities
+ and filter them by name.
+
+ :param service: service of the entity
+ :param name: name of the entity
+ :return: Entity object returned by Python SDK
+ """
+ # Check if 'list' method support search(look for search parameter):
+ if 'search' in inspect.getargspec(service.list)[0]:
+ res = service.list(
+ # There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
+ search='name="{name}"'.format(name=name)
+ )
+ else:
+ res = [e for e in service.list() if e.name == name]
+
+ if kwargs:
+ res = [
+ e for e in service.list() if len([
+ k for k, v in kwargs.items() if getattr(e, k, None) == v
+ ]) == len(kwargs)
+ ]
+
+ res = res or [None]
+ return res[0]
+
+
+def get_entity(service, get_params=None):
+ """
+ Ignore SDK Error in case of getting an entity from service.
+ """
+ entity = None
+ try:
+ if get_params is not None:
+ entity = service.get(**get_params)
+ else:
+ entity = service.get()
+ except sdk.Error:
+ # We can get here 404, we should ignore it, in case
+ # of removing entity for example.
+ pass
+ return entity
+
+
+def get_id_by_name(service, name, raise_error=True, ignore_case=False):
+ """
+ Search an entity ID by it's name.
+ """
+ entity = search_by_name(service, name)
+
+ if entity is not None:
+ return entity.id
+
+ if raise_error:
+ raise Exception("Entity '%s' was not found." % name)
+
+
+def wait(
+ service,
+ condition,
+ fail_condition=lambda e: False,
+ timeout=180,
+ wait=True,
+ poll_interval=3,
+):
+ """
+ Wait until entity fulfill expected condition.
+
+ :param service: service of the entity
+ :param condition: condition to be fulfilled
+ :param fail_condition: if this condition is true, raise Exception
+ :param timeout: max time to wait in seconds
+ :param wait: if True wait for condition, if False don't wait
+ :param poll_interval: Number of seconds we should wait until next condition check
+ """
+ # Wait until the desired state of the entity:
+ if wait:
+ start = time.time()
+ while time.time() < start + timeout:
+ # Exit if the condition of entity is valid:
+ entity = get_entity(service)
+ if condition(entity):
+ return
+ elif fail_condition(entity):
+ raise Exception("Error while waiting on result state of the entity.")
+
+ # Sleep for `poll_interval` seconds if none of the conditions apply:
+ time.sleep(float(poll_interval))
+
+ raise Exception("Timeout exceed while waiting on result state of the entity.")
+
+
+def __get_auth_dict():
+ OVIRT_URL = os.environ.get('OVIRT_URL')
+ OVIRT_HOSTNAME = os.environ.get('OVIRT_HOSTNAME')
+ OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
+ OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
+ OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
+ OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
+ OVIRT_INSECURE = OVIRT_CAFILE is None
+
+ env_vars = None
+ if OVIRT_URL is None and OVIRT_HOSTNAME is not None:
+ OVIRT_URL = 'https://{0}/ovirt-engine/api'.format(OVIRT_HOSTNAME)
+ if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
+ env_vars = {
+ 'url': OVIRT_URL,
+ 'username': OVIRT_USERNAME,
+ 'password': OVIRT_PASSWORD,
+ 'insecure': OVIRT_INSECURE,
+ 'token': OVIRT_TOKEN,
+ 'ca_file': OVIRT_CAFILE,
+ }
+ if env_vars is not None:
+ auth = dict(default=env_vars, type='dict')
+ else:
+ auth = dict(required=True, type='dict')
+
+ return auth
+
+
+def ovirt_info_full_argument_spec(**kwargs):
+ """
+ Extend parameters of info module with parameters which are common to all
+ oVirt info modules.
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ spec = dict(
+ auth=__get_auth_dict(),
+ fetch_nested=dict(default=False, type='bool'),
+ nested_attributes=dict(type='list', default=list(), elements='str'),
+ )
+ spec.update(kwargs)
+ return spec
+
+
+# Left for third-party module compatibility
+def ovirt_facts_full_argument_spec(**kwargs):
+ """
+ This is deprecated. Please use ovirt_info_full_argument_spec instead!
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ return ovirt_info_full_argument_spec(**kwargs)
+
+
+def ovirt_full_argument_spec(**kwargs):
+ """
+ Extend parameters of module with parameters which are common to all oVirt modules.
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ spec = dict(
+ auth=__get_auth_dict(),
+ timeout=dict(default=180, type='int'),
+ wait=dict(default=True, type='bool'),
+ poll_interval=dict(default=3, type='int'),
+ fetch_nested=dict(default=False, type='bool'),
+ nested_attributes=dict(type='list', default=list(), elements='str'),
+ )
+ spec.update(kwargs)
+ return spec
+
+
+def check_params(module):
+ """
+ Most modules must have either `name` or `id` specified.
+ """
+ if module.params.get('name') is None and module.params.get('id') is None:
+ module.fail_json(msg='"name" or "id" is required')
+
+
+def engine_supported(connection, version):
+ return LooseVersion(engine_version(connection)) >= LooseVersion(version)
+
+
+def check_support(version, connection, module, params):
+ """
+ Check if parameters used by user are supported by oVirt Python SDK
+ and oVirt engine.
+ """
+ api_version = LooseVersion(engine_version(connection))
+ version = LooseVersion(version)
+ for param in params:
+ if module.params.get(param) is not None:
+ return LooseVersion(sdk_version.VERSION) >= version and api_version >= version
+
+ return True
+
+
+class BaseModule(object):
+ """
+ This is base class for oVirt modules. oVirt modules should inherit this
+ class and override method to customize specific needs of the module.
+ The only abstract method of this class is `build_entity`, which must
+ to be implemented in child class.
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, connection, module, service, changed=False):
+ self._connection = connection
+ self._module = module
+ self._service = service
+ self._changed = changed
+ self._diff = {'after': dict(), 'before': dict()}
+
+ @property
+ def changed(self):
+ return self._changed
+
+ @changed.setter
+ def changed(self, changed):
+ if not self._changed:
+ self._changed = changed
+
+ @abstractmethod
+ def build_entity(self):
+ """
+ This method should return oVirt Python SDK type, which we want to
+ create or update, initialized by values passed by Ansible module.
+
+ For example if we want to create VM, we will return following:
+ types.Vm(name=self._module.params['vm_name'])
+
+ :return: Specific instance of sdk.Struct.
+ """
+ pass
+
+ def param(self, name, default=None):
+ """
+ Return a module parameter specified by it's name.
+ """
+ return self._module.params.get(name, default)
+
+ def update_check(self, entity):
+ """
+ This method handle checks whether the entity values are same as values
+ passed to ansible module. By default we don't compare any values.
+
+ :param entity: Entity we want to compare with Ansible module values.
+ :return: True if values are same, so we don't need to update the entity.
+ """
+ return True
+
+ def pre_create(self, entity):
+ """
+ This method is called right before entity is created.
+
+ :param entity: Entity to be created or updated.
+ """
+ pass
+
+ def post_create(self, entity):
+ """
+ This method is called right after entity is created.
+
+ :param entity: Entity which was created.
+ """
+ pass
+
+ def post_update(self, entity):
+ """
+ This method is called right after entity is updated.
+
+ :param entity: Entity which was updated.
+ """
+ pass
+
+ def diff_update(self, after, update):
+ for k, v in update.items():
+ if isinstance(v, Mapping):
+ after[k] = self.diff_update(after.get(k, dict()), v)
+ else:
+ after[k] = update[k]
+ return after
+
+ def create(
+ self,
+ entity=None,
+ result_state=None,
+ fail_condition=lambda e: False,
+ search_params=None,
+ update_params=None,
+ _wait=None,
+ force_create=False,
+ **kwargs
+ ):
+ """
+ Method which is called when state of the entity is 'present'. If user
+ don't provide `entity` parameter the entity is searched using
+ `search_params` parameter. If entity is found it's updated, whether
+ the entity should be updated is checked by `update_check` method.
+ The corresponding updated entity is build by `build_entity` method.
+
+ Function executed after entity is created can optionally be specified
+ in `post_create` parameter. Function executed after entity is updated
+ can optionally be specified in `post_update` parameter.
+
+ :param entity: Entity we want to update, if exists.
+ :param result_state: State which should entity has in order to finish task.
+ :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param update_params: The params which should be passed to update method.
+ :param kwargs: Additional parameters passed when creating entity.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None and not force_create:
+ entity = self.search_entity(search_params)
+
+ self.pre_create(entity)
+
+ if entity:
+ # Entity exists, so update it:
+ entity_service = self._service.service(entity.id)
+ if not self.update_check(entity):
+ new_entity = self.build_entity()
+ if not self._module.check_mode:
+ update_params = update_params or {}
+ updated_entity = entity_service.update(
+ new_entity,
+ **update_params
+ )
+ self.post_update(entity)
+
+ # Update diffs only if user specified --diff parameter,
+ # so we don't useless overload API:
+ if self._module._diff:
+ before = get_dict_of_struct(
+ entity,
+ self._connection,
+ fetch_nested=True,
+ attributes=['name'],
+ )
+ after = before.copy()
+ self.diff_update(after, get_dict_of_struct(new_entity))
+ self._diff['before'] = before
+ self._diff['after'] = after
+
+ self.changed = True
+ else:
+ # Entity don't exists, so create it:
+ if not self._module.check_mode:
+ entity = self._service.add(
+ self.build_entity(),
+ **kwargs
+ )
+ self.post_create(entity)
+ self.changed = True
+
+ if not self._module.check_mode:
+ # Wait for the entity to be created and to be in the defined state:
+ entity_service = self._service.service(entity.id)
+
+ def state_condition(entity):
+ return entity
+
+ if result_state:
+
+ def state_condition(entity):
+ return entity and entity.status == result_state
+
+ wait(
+ service=entity_service,
+ condition=state_condition,
+ fail_condition=fail_condition,
+ wait=_wait if _wait is not None else self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+
+ return {
+ 'changed': self.changed,
+ 'id': getattr(entity, 'id', None),
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ 'diff': self._diff,
+ }
+
+ def pre_remove(self, entity):
+ """
+ This method is called right before entity is removed.
+
+ :param entity: Entity which we want to remove.
+ """
+ pass
+
+ def entity_name(self, entity):
+ return "{e_type} '{e_name}'".format(
+ e_type=type(entity).__name__.lower(),
+ e_name=getattr(entity, 'name', None),
+ )
+
+ def remove(self, entity=None, search_params=None, **kwargs):
+ """
+ Method which is called when state of the entity is 'absent'. If user
+ don't provide `entity` parameter the entity is searched using
+ `search_params` parameter. If entity is found it's removed.
+
+ Function executed before remove is executed can optionally be specified
+ in `pre_remove` parameter.
+
+ :param entity: Entity we want to remove.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param kwargs: Additional parameters passed when removing entity.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None:
+ entity = self.search_entity(search_params)
+
+ if entity is None:
+ return {
+ 'changed': self.changed,
+ 'msg': "Entity wasn't found."
+ }
+
+ self.pre_remove(entity)
+
+ entity_service = self._service.service(entity.id)
+ if not self._module.check_mode:
+ entity_service.remove(**kwargs)
+ wait(
+ service=entity_service,
+ condition=lambda entity: not entity,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+ self.changed = True
+
+ return {
+ 'changed': self.changed,
+ 'id': entity.id,
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ }
+
+ def action(
+ self,
+ action,
+ entity=None,
+ action_condition=lambda e: e,
+ wait_condition=lambda e: e,
+ fail_condition=lambda e: False,
+ pre_action=lambda e: e,
+ post_action=lambda e: None,
+ search_params=None,
+ **kwargs
+ ):
+ """
+ This method is executed when we want to change the state of some oVirt
+ entity. The action to be executed on oVirt service is specified by
+ `action` parameter. Whether the action should be executed can be
+ specified by passing `action_condition` parameter. State which the
+ entity should be in after execution of the action can be specified
+ by `wait_condition` parameter.
+
+ Function executed before an action on entity can optionally be specified
+ in `pre_action` parameter. Function executed after an action on entity can
+ optionally be specified in `post_action` parameter.
+
+ :param action: Action which should be executed by service on entity.
+ :param entity: Entity we want to run action on.
+ :param action_condition: Function which is executed when checking if action should be executed.
+ :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
+ :param wait_condition: Function which is executed when waiting on result state.
+ :param pre_action: Function which is executed before running the action.
+ :param post_action: Function which is executed after running the action.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param kwargs: Additional parameters passed to action.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None:
+ entity = self.search_entity(search_params)
+
+ entity = pre_action(entity)
+
+ if entity is None:
+ self._module.fail_json(
+ msg="Entity not found, can't run action '{0}'.".format(
+ action
+ )
+ )
+
+ entity_service = self._service.service(entity.id)
+ entity = entity_service.get()
+ if action_condition(entity):
+ if not self._module.check_mode:
+ getattr(entity_service, action)(**kwargs)
+ self.changed = True
+
+ post_action(entity)
+
+ wait(
+ service=self._service.service(entity.id),
+ condition=wait_condition,
+ fail_condition=fail_condition,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+ return {
+ 'changed': self.changed,
+ 'id': entity.id,
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ 'diff': self._diff,
+ }
+
+ def wait_for_import(self, condition=lambda e: True):
+ if self._module.params['wait']:
+ start = time.time()
+ timeout = self._module.params['timeout']
+ poll_interval = self._module.params['poll_interval']
+ while time.time() < start + timeout:
+ entity = self.search_entity()
+ if entity and condition(entity):
+ return entity
+ time.sleep(poll_interval)
+
+ def search_entity(self, search_params=None, list_params=None):
+ """
+ Always first try to search by `ID`, if ID isn't specified,
+ check if user constructed special search in `search_params`,
+ if not search by `name`.
+ """
+ entity = None
+
+ if 'id' in self._module.params and self._module.params['id'] is not None:
+ entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params)
+ elif search_params is not None:
+ entity = search_by_attributes(self._service, list_params=list_params, **search_params)
+ elif self._module.params.get('name') is not None:
+ entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name'])
+
+ return entity
+
+ def _get_major(self, full_version):
+ if full_version is None or full_version == "":
+ return None
+ if isinstance(full_version, otypes.Version):
+ return int(full_version.major)
+ return int(full_version.split('.')[0])
+
+ def _get_minor(self, full_version):
+ if full_version is None or full_version == "":
+ return None
+ if isinstance(full_version, otypes.Version):
+ return int(full_version.minor)
+ return int(full_version.split('.')[1])
+
+
+def _sdk4_error_maybe():
+ """
+ Allow for ovirtsdk4 not being installed.
+ """
+ if HAS_SDK:
+ return sdk.Error
+ return type(None)
+
+
+class OvirtRetry(CloudRetry):
+ base_class = _sdk4_error_maybe()
+
+ @staticmethod
+ def status_code_from_exception(error):
+ return error.code
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ # This is a list of error codes to retry.
+ retry_on = [
+ # HTTP status: Conflict
+ 409,
+ ]
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+
+ return response_code in retry_on
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/__init__.py
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_group.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_group.py
new file mode 100644
index 00000000..0eab3252
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_group.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_group
+short_description: Module to manage affinity groups in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "This module manage affinity groups in oVirt/RHV. It can also manage assignments
+ of those groups to VMs."
+options:
+ name:
+ description:
+ - Name of the affinity group to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - Should the affinity group be present or absent.
+ choices: [ absent, present ]
+ type: str
+ default: present
+ cluster:
+ description:
+ - Name of the cluster of the affinity group.
+ type: str
+ required: true
+ description:
+ description:
+ - Description of the affinity group.
+ type: str
+ host_enforcing:
+ description:
+ - If I(yes) VM cannot start on host if it does not satisfy the C(host_rule).
+ - This parameter is support since oVirt/RHV 4.1 version.
+ type: bool
+ host_rule:
+ description:
+ - If I(positive) I(all) VMs in this group should run on the this host.
+ - If I(negative) I(no) VMs in this group should run on the this host.
+ - If I(disabled) this affinity group doesn't take effect.
+ - This parameter is support since oVirt/RHV 4.1 version.
+ choices: [ disabled, negative, positive ]
+ type: str
+ vm_enforcing:
+ description:
+ - If I(yes) VM cannot start if it does not satisfy the C(vm_rule).
+ type: bool
+ vm_rule:
+ description:
+ - If I(positive) I(all) VMs in this group should run on the host defined by C(host_rule).
+ - If I(negative) I(no) VMs in this group should run on the host defined by C(host_rule).
+ - If I(disabled) this affinity group doesn't take effect.
+ choices: [ disabled, negative, positive ]
+ type: str
+ vms:
+ description:
+ - List of the VMs names, which should have assigned this affinity group.
+ type: list
+ elements: str
+ hosts:
+ description:
+ - List of the hosts names, which should have assigned this affinity group.
+ - This parameter is support since oVirt/RHV 4.1 version.
+ type: list
+ elements: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Create(if not exists) and assign affinity group to VMs vm1 and vm2 and host host1
+ ovirt.ovirt.ovirt_affinity_group:
+ name: mygroup
+ cluster: mycluster
+ vm_enforcing: true
+ vm_rule: positive
+ host_enforcing: true
+ host_rule: positive
+ vms:
+ - vm1
+ - vm2
+ hosts:
+ - host1
+
+- name: Detach VMs from affinity group and disable VM rule
+ ovirt.ovirt.ovirt_affinity_group:
+ name: mygroup
+ cluster: mycluster
+ vm_enforcing: false
+ vm_rule: disabled
+ host_enforcing: true
+ host_rule: positive
+ vms: []
+ hosts:
+ - host1
+ - host2
+
+- name: Remove affinity group
+ ovirt.ovirt.ovirt_affinity_group:
+ state: absent
+ cluster: mycluster
+ name: mygroup
+'''
+
+RETURN = '''
+id:
+ description: ID of the affinity group which is managed
+ returned: On success if affinity group is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+affinity_group:
+ description: "Dictionary of all the affinity group attributes. Affinity group attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_group."
+ returned: On success if affinity group is found.
+ type: str
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_support,
+ create_connection,
+ get_id_by_name,
+ equal,
+ engine_supported,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+class AffinityGroupsModule(BaseModule):
+
+ def __init__(self, vm_ids, host_ids, *args, **kwargs):
+ super(AffinityGroupsModule, self).__init__(*args, **kwargs)
+ self._vm_ids = vm_ids
+ self._host_ids = host_ids
+
+ def update_vms(self, affinity_group):
+ """
+ This method iterate via the affinity VM assignments and datech the VMs
+ which should not be attached to affinity and attach VMs which should be
+ attached to affinity.
+ """
+ assigned_vms = self.assigned_vms(affinity_group)
+ to_remove = [vm for vm in assigned_vms if vm not in self._vm_ids]
+ to_add = [vm for vm in self._vm_ids if vm not in assigned_vms]
+ ag_service = self._service.group_service(affinity_group.id)
+ for vm in to_remove:
+ ag_service.vms_service().vm_service(vm).remove()
+ for vm in to_add:
+ # API return <action> element instead of VM element, so we
+ # need to WA this issue, for oVirt/RHV versions having this bug:
+ try:
+ ag_service.vms_service().add(otypes.Vm(id=vm))
+ except ValueError as ex:
+ if 'complete' not in str(ex):
+ raise ex
+
+ def post_create(self, affinity_group):
+ self.update_vms(affinity_group)
+
+ def post_update(self, affinity_group):
+ self.update_vms(affinity_group)
+
+ def build_entity(self):
+ affinity_group = otypes.AffinityGroup(
+ name=self._module.params['name'],
+ description=self._module.params['description'],
+ positive=(
+ self._module.params['vm_rule'] == 'positive'
+ ) if self._module.params['vm_rule'] is not None else None,
+ enforcing=(
+ self._module.params['vm_enforcing']
+ ) if self._module.params['vm_enforcing'] is not None else None,
+ )
+
+ # Those attributes are Supported since 4.1:
+ if not engine_supported(self._connection, '4.1'):
+ return affinity_group
+
+ affinity_group.hosts_rule = otypes.AffinityRule(
+ positive=(
+ self.param('host_rule') == 'positive'
+ ) if self.param('host_rule') is not None else None,
+ enforcing=self.param('host_enforcing'),
+ ) if (
+ self.param('host_enforcing') is not None or
+ self.param('host_rule') is not None
+ ) else None
+
+ affinity_group.vms_rule = otypes.AffinityRule(
+ positive=(
+ self.param('vm_rule') == 'positive'
+ ) if self.param('vm_rule') is not None else None,
+ enforcing=self.param('vm_enforcing'),
+ enabled=(
+ self.param('vm_rule') in ['negative', 'positive']
+ ) if self.param('vm_rule') is not None else None,
+ ) if (
+ self.param('vm_enforcing') is not None or
+ self.param('vm_rule') is not None
+ ) else None
+
+ affinity_group.hosts = [
+ otypes.Host(id=host_id) for host_id in self._host_ids
+ ] if self._host_ids is not None else None
+
+ return affinity_group
+
+ def assigned_vms(self, affinity_group):
+ if getattr(affinity_group.vms, 'href', None):
+ return sorted([
+ vm.id for vm in self._connection.follow_link(affinity_group.vms)
+ ])
+ else:
+ return sorted([vm.id for vm in affinity_group.vms])
+
+ def update_check(self, entity):
+ assigned_vms = self.assigned_vms(entity)
+ do_update = (
+ equal(self.param('description'), entity.description) and equal(self.param('vm_enforcing'), entity.enforcing) and equal(
+ self.param('vm_rule') == 'positive' if self.param('vm_rule') else None,
+ entity.positive
+ ) and equal(self._vm_ids, assigned_vms)
+ )
+ # Following attributes is supported since 4.1,
+ # so return if it doesn't exist:
+ if not engine_supported(self._connection, '4.1'):
+ return do_update
+
+ # Following is supported since 4.1:
+ return do_update and (
+ equal(
+ self.param('host_rule') == 'positive' if self.param('host_rule') else None,
+ entity.hosts_rule.positive) and equal(self.param('host_enforcing'), entity.hosts_rule.enforcing) and equal(
+ self.param('vm_rule') in ['negative', 'positive'] if self.param('vm_rule') else None,
+ entity.vms_rule.enabled) and equal(self._host_ids, sorted([host.id for host in entity.hosts]))
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ cluster=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ description=dict(type='str'),
+ vm_enforcing=dict(type='bool'),
+ vm_rule=dict(type='str', choices=['disabled', 'negative', 'positive']),
+ host_enforcing=dict(type='bool'),
+ host_rule=dict(type='str', choices=['disabled', 'negative', 'positive']),
+ vms=dict(type='list', elements='str'),
+ hosts=dict(type='list', elements='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ # Check if unsupported parameters were passed:
+ supported_41 = ('host_enforcing', 'host_rule', 'hosts')
+ if not check_support(
+ version='4.1',
+ connection=connection,
+ module=module,
+ params=supported_41,
+ ):
+ module.fail_json(
+ msg='Following parameters are supported since 4.1: {params}'.format(
+ params=supported_41,
+ )
+ )
+ clusters_service = connection.system_service().clusters_service()
+ vms_service = connection.system_service().vms_service()
+ hosts_service = connection.system_service().hosts_service()
+ cluster_name = module.params['cluster']
+ cluster = search_by_name(clusters_service, cluster_name)
+ if cluster is None:
+ raise Exception("Cluster '%s' was not found." % cluster_name)
+ cluster_service = clusters_service.cluster_service(cluster.id)
+ affinity_groups_service = cluster_service.affinity_groups_service()
+
+ # Fetch VM ids which should be assigned to affinity group:
+ vm_ids = sorted([
+ get_id_by_name(vms_service, vm_name)
+ for vm_name in module.params['vms']
+ ]) if module.params['vms'] is not None else None
+ # Fetch host ids which should be assigned to affinity group:
+ host_ids = sorted([
+ get_id_by_name(hosts_service, host_name)
+ for host_name in module.params['hosts']
+ ]) if module.params['hosts'] is not None else None
+
+ affinity_groups_module = AffinityGroupsModule(
+ connection=connection,
+ module=module,
+ service=affinity_groups_service,
+ vm_ids=vm_ids,
+ host_ids=host_ids,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = affinity_groups_module.create()
+ elif state == 'absent':
+ ret = affinity_groups_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label.py
new file mode 100644
index 00000000..7c44495f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_label
+short_description: Module to manage affinity labels in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "This module manage affinity labels in oVirt/RHV. It can also manage assignments
+ of those labels to hosts and VMs."
+options:
+ name:
+ description:
+ - "Name of the affinity label to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the affinity label be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ cluster:
+ description:
+ - "Name of the cluster where vms and hosts resides."
+ type: str
+ vms:
+ description:
+ - "List of the VMs names, which should have assigned this affinity label."
+ type: list
+ elements: str
+ hosts:
+ description:
+ - "List of the hosts names, which should have assigned this affinity label."
+ type: list
+ elements: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create(if not exists) and assign affinity label to vms vm1 and vm2 and host host1
+- ovirt.ovirt.ovirt_affinity_label:
+ name: mylabel
+ cluster: mycluster
+ vms:
+ - vm1
+ - vm2
+ hosts:
+ - host1
+
+# To detach all VMs from label
+- ovirt.ovirt.ovirt_affinity_label:
+ name: mylabel
+ cluster: mycluster
+ vms: []
+
+# Remove affinity label
+- ovirt.ovirt.ovirt_affinity_label:
+ state: absent
+ name: mylabel
+'''
+
+RETURN = '''
+id:
+ description: ID of the affinity label which is managed
+ returned: On success if affinity label is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+affinity_label:
+ description: "Dictionary of all the affinity label attributes. Affinity label attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
+ type: dict
+ returned: On success if affinity label is found.
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from collections import defaultdict
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ ovirt_full_argument_spec,
+)
+
+
+class AffinityLabelsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.AffinityLabel(name=self._module.params['name'])
+
+ def post_create(self, entity):
+ self.update_check(entity)
+
+ def pre_remove(self, entity):
+ self._module.params['vms'] = []
+ self._module.params['hosts'] = []
+ self.update_check(entity)
+
+ def _update_label_assignments(self, entity, name, label_obj_type):
+ objs_service = getattr(self._connection.system_service(), '%s_service' % name)()
+ if self._module.params[name] is not None:
+ objs = self._connection.follow_link(getattr(entity, name))
+ objs_names = defaultdict(list)
+ for obj in objs:
+ labeled_entity = objs_service.service(obj.id).get()
+ if self._module.params['cluster'] is None:
+ objs_names[labeled_entity.name].append(obj.id)
+ elif self._connection.follow_link(labeled_entity.cluster).name == self._module.params['cluster']:
+ objs_names[labeled_entity.name].append(obj.id)
+
+ for obj in self._module.params[name]:
+ if obj not in objs_names:
+ for obj_id in objs_service.list(
+ search='name=%s and cluster=%s' % (obj, self._module.params['cluster'])
+ ):
+ label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
+ if not self._module.check_mode:
+ label_service.add(**{
+ name[:-1]: label_obj_type(id=obj_id.id)
+ })
+ self.changed = True
+
+ for obj in objs_names:
+ if obj not in self._module.params[name]:
+ label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
+ if not self._module.check_mode:
+ for obj_id in objs_names[obj]:
+ label_service.service(obj_id).remove()
+ self.changed = True
+
+ def update_check(self, entity):
+ self._update_label_assignments(entity, 'vms', otypes.Vm)
+ self._update_label_assignments(entity, 'hosts', otypes.Host)
+ return True
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ cluster=dict(default=None),
+ name=dict(required=True),
+ vms=dict(default=None, type='list', elements='str'),
+ hosts=dict(default=None, type='list', elements='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['cluster']),
+ ],
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ affinity_labels_service = connection.system_service().affinity_labels_service()
+ affinity_labels_module = AffinityLabelsModule(
+ connection=connection,
+ module=module,
+ service=affinity_labels_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = affinity_labels_module.create()
+ elif state == 'absent':
+ ret = affinity_labels_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label_info.py
new file mode 100644
index 00000000..3894371d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_affinity_label_info.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_label_info
+short_description: Retrieve information about one or more oVirt/RHV affinity labels
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV affinity labels."
+ - This module was called C(ovirt_affinity_label_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_affinity_label_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_affinity_labels), which
+ contains a list of affinity labels. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the affinity labels which should be listed."
+ type: str
+ vm:
+ description:
+ - "Name of the VM, which affinity labels should be listed."
+ type: str
+ host:
+ description:
+ - "Name of the host, which affinity labels should be listed."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all affinity labels, which names start with C(label):
+- ovirt.ovirt.ovirt_affinity_label_info:
+ name: label*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+# Gather information about all affinity labels, which are assigned to VMs
+# which names start with C(postgres):
+- ovirt.ovirt.ovirt_affinity_label_info:
+ vm: postgres*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+# Gather information about all affinity labels, which are assigned to hosts
+# which names start with C(west):
+- ovirt.ovirt.ovirt_affinity_label_info:
+ host: west*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+# Gather information about all affinity labels, which are assigned to hosts
+# which names start with C(west) or VMs which names start with C(postgres):
+- ovirt.ovirt.ovirt_affinity_label_info:
+ host: west*
+ vm: postgres*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+'''
+
+RETURN = '''
+ovirt_affinity_labels:
+ description: "List of dictionaries describing the affinity labels. Affinity labels attributes are mapped to dictionary keys,
+ all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ affinity_labels_service = connection.system_service().affinity_labels_service()
+ labels = []
+ all_labels = affinity_labels_service.list()
+ if module.params['name']:
+ labels.extend([
+ l for l in all_labels
+ if fnmatch.fnmatch(l.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ if search_by_name(hosts_service, module.params['host']) is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ labels.extend([
+ label
+ for label in all_labels
+ for host in connection.follow_link(label.hosts)
+ if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])
+ ])
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ if search_by_name(vms_service, module.params['vm']) is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ labels.extend([
+ label
+ for label in all_labels
+ for vm in connection.follow_link(label.vms)
+ if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])
+ ])
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ labels = all_labels
+
+ result = dict(
+ ovirt_affinity_labels=[
+ get_dict_of_struct(
+ struct=l,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for l in labels
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_api_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_api_info.py
new file mode 100644
index 00000000..6cec91cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_api_info.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_api_info
+short_description: Retrieve information about the oVirt/RHV API
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about the oVirt/RHV API."
+ - This module was called C(ovirt_api_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_api_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_api),
+ which contains a information about oVirt/RHV API. You need to register the result with
+ the I(register) keyword to use it."
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information oVirt API:
+- ovirt.ovirt.ovirt_api_info:
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_api }}"
+'''
+
+RETURN = '''
+ovirt_api:
+ description: "Dictionary describing the oVirt API information.
+ Api attributes are mapped to dictionary keys,
+ all API attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/api."
+ returned: On success.
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec()
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ api = connection.system_service().get()
+ result = dict(
+ ovirt_api=get_dict_of_struct(
+ struct=api,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ )
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_auth.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_auth.py
new file mode 100644
index 00000000..09d30383
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_auth.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_auth
+short_description: "Module to manage authentication to oVirt/RHV"
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+description:
+ - "This module authenticates to oVirt/RHV engine and creates SSO token, which should be later used in
+ all other oVirt/RHV modules, so all modules don't need to perform login and logout.
+ This module returns an Ansible fact called I(ovirt_auth). Every module can use this
+ fact as C(auth) parameter, to perform authentication."
+options:
+ state:
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - "Specifies if a token should be created or revoked."
+ type: str
+ username:
+ required: False
+ description:
+ - "The name of the user. For example: I(admin@internal)
+ Default value is set by I(OVIRT_USERNAME) environment variable."
+ type: str
+ password:
+ required: False
+ description:
+ - "The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
+ type: str
+ token:
+ required: False
+ description:
+ - "SSO token to be used instead of login with username/password.
+ Default value is set by I(OVIRT_TOKEN) environment variable."
+ type: str
+ url:
+ required: False
+ description:
+ - "A string containing the API URL of the server.
+ For example: I(https://server.example.com/ovirt-engine/api).
+ Default value is set by I(OVIRT_URL) environment variable."
+ - "Either C(url) or C(hostname) is required."
+ type: str
+ hostname:
+ required: False
+ description:
+ - "A string containing the hostname of the server.
+ For example: I(server.example.com).
+ Default value is set by I(OVIRT_HOSTNAME) environment variable."
+ - "Either C(url) or C(hostname) is required."
+ type: str
+ insecure:
+ required: False
+ description:
+ - "A boolean flag that indicates if the server TLS certificate and host name should be checked."
+ type: bool
+ ca_file:
+ required: False
+ description:
+ - "A PEM file containing the trusted CA certificates. The
+ certificate presented by the server will be verified using these CA
+ certificates. If C(ca_file) parameter is not set, system wide
+ CA certificate store is used.
+ Default value is set by I(OVIRT_CAFILE) environment variable."
+ type: path
+ timeout:
+ required: False
+ description:
+ - "The maximum total time to wait for the response, in
+ seconds. A value of zero (the default) means wait forever. If
+ the timeout expires before the response is received an exception
+ will be raised."
+ type: int
+ compress:
+ required: False
+ description:
+ - "A boolean flag indicating if the SDK should ask
+ the server to send compressed responses. The default is I(True).
+ Note that this is a hint for the server, and that it may return
+ uncompressed data even when this parameter is set to I(True)."
+ type: bool
+ default: true
+ kerberos:
+ required: False
+ description:
+ - "A boolean flag indicating if Kerberos authentication
+ should be used instead of the default basic authentication."
+ type: bool
+ headers:
+ required: False
+ description:
+ - "A dictionary of HTTP headers to be added to each API call."
+ type: dict
+ ovirt_auth:
+ description:
+ - "Previous run of the ovirt_auth used with C(state) absent"
+ - "Closes connection with the engine."
+ type: dict
+requirements:
+ - python >= 2.7
+ - ovirt-engine-sdk-python >= 4.4.0
+notes:
+ - "Everytime you use ovirt_auth module to obtain ticket, you need to also revoke the ticket,
+ when you no longer need it, otherwise the ticket would be revoked by engine when it expires.
+ For an example of how to achieve that, please take a look at I(examples) section."
+ - "In order to use this module you have to install oVirt/RHV Python SDK.
+ To ensure it's installed with correct version you can create the following task:
+ I(pip: name=ovirt-engine-sdk-python version=4.4.0)"
+ - "Note that in oVirt/RHV 4.1 if you want to use a user which is not administrator
+ you must enable the I(ENGINE_API_FILTER_BY_DEFAULT) variable in engine. In
+ oVirt/RHV 4.2 and later it's enabled by default."
+'''
+
+EXAMPLES = '''
+ - block:
+ # Create a vault with `ovirt_password` variable which store your
+ # oVirt/RHV user's password, and include that yaml file with variable:
+ - ansible.builtin.include_vars: ovirt_password.yml
+
+ - name: Obtain SSO token with using username/password credentials
+ ovirt.ovirt.ovirt_auth:
+ url: https://ovirt.example.com/ovirt-engine/api
+ username: admin@internal
+ ca_file: ca.pem
+ password: "{{ ovirt_password }}"
+
+ # Previous task generated I(ovirt_auth) fact, which you can later use
+ # in different modules as follows:
+ - ovirt.ovirt.ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: absent
+ name: myvm
+
+ always:
+ - name: Always revoke the SSO token
+ ovirt.ovirt.ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+
+# When user will set following environment variables:
+# OVIRT_URL = https://fqdn/ovirt-engine/api
+# OVIRT_USERNAME = admin@internal
+# OVIRT_PASSWORD = the_password
+# User can login the oVirt using environment variable instead of variables
+# in yaml file.
+# This is mainly useful when using Ansible Tower or AWX, as it will work
+# for Red Hat Virtualization credentials type.
+ - name: Obtain SSO token
+ ovirt_auth:
+ state: present
+'''
+
+RETURN = '''
+ovirt_auth:
+ description: Authentication facts, needed to perform authentication to oVirt/RHV.
+ returned: success
+ type: complex
+ contains:
+ token:
+ description: SSO token which is used for connection to oVirt/RHV engine.
+ returned: success
+ type: str
+ sample: "kdfVWp9ZgeewBXV-iq3Js1-xQJZPSEQ334FLb3eksoEPRaab07DhZ8ED8ghz9lJd-MQ2GqtRIeqhvhCkrUWQPw"
+ url:
+ description: URL of the oVirt/RHV engine API endpoint.
+ returned: success
+ type: str
+ sample: "https://ovirt.example.com/ovirt-engine/api"
+ ca_file:
+ description: CA file, which is used to verify SSL/TLS connection.
+ returned: success
+ type: str
+ sample: "ca.pem"
+ insecure:
+ description: Flag indicating if insecure connection is used.
+ returned: success
+ type: bool
+ sample: False
+ timeout:
+ description: Number of seconds to wait for response.
+ returned: success
+ type: int
+ sample: 0
+ compress:
+ description: Flag indicating if compression is used for connection.
+ returned: success
+ type: bool
+ sample: True
+ kerberos:
+ description: Flag indicating if kerberos is used for authentication.
+ returned: success
+ type: bool
+ sample: False
+ headers:
+ description: Dictionary of HTTP headers to be added to each API call.
+ returned: success
+ type: dict
+'''
+
+import os
+import traceback
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import check_sdk
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(default=None),
+ hostname=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ca_file=dict(default=None, type='path'),
+ insecure=dict(required=False, type='bool', default=None),
+ timeout=dict(required=False, type='int', default=0),
+ compress=dict(required=False, type='bool', default=True),
+ kerberos=dict(required=False, type='bool', default=False),
+ headers=dict(required=False, type='dict'),
+ state=dict(default='present', choices=['present', 'absent']),
+ token=dict(default=None),
+ ovirt_auth=dict(required=False, type='dict'),
+ ),
+ required_if=[
+ ('state', 'absent', ['ovirt_auth']),
+ ],
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ state = module.params.get('state')
+ if state == 'present':
+ params = module.params
+ elif state == 'absent':
+ params = module.params['ovirt_auth']
+
+ def get_required_parameter(param, env_var, required=False):
+ var = params.get(param) or os.environ.get(env_var)
+ if not var and required and state == 'present':
+ module.fail_json(msg="'%s' is a required parameter." % param)
+
+ return var
+
+ url = get_required_parameter('url', 'OVIRT_URL', required=False)
+ hostname = get_required_parameter('hostname', 'OVIRT_HOSTNAME', required=False)
+ if url is None and hostname is None:
+ module.fail_json(msg="You must specify either 'url' or 'hostname'.")
+
+ if url is None and hostname is not None:
+ url = 'https://{0}/ovirt-engine/api'.format(hostname)
+
+ username = get_required_parameter('username', 'OVIRT_USERNAME')
+ password = get_required_parameter('password', 'OVIRT_PASSWORD')
+ token = get_required_parameter('token', 'OVIRT_TOKEN')
+ ca_file = get_required_parameter('ca_file', 'OVIRT_CAFILE')
+ insecure = params.get('insecure') if params.get('insecure') is not None else not bool(ca_file)
+
+ connection = sdk.Connection(
+ url=url,
+ username=username,
+ password=password,
+ ca_file=ca_file,
+ insecure=insecure,
+ timeout=params.get('timeout'),
+ compress=params.get('compress'),
+ kerberos=params.get('kerberos'),
+ headers=params.get('headers'),
+ token=token,
+ )
+ try:
+ token = connection.authenticate()
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_auth=dict(
+ token=token,
+ url=url,
+ ca_file=ca_file,
+ insecure=insecure,
+ timeout=params.get('timeout'),
+ compress=params.get('compress'),
+ kerberos=params.get('kerberos'),
+ headers=params.get('headers'),
+ ) if state == 'present' else dict()
+ )
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ # Close the connection, but don't revoke token
+ connection.close(logout=state == 'absent')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster.py
new file mode 100644
index 00000000..177bb26f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster.py
@@ -0,0 +1,792 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_cluster
+short_description: Module to manage clusters in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage clusters in oVirt/RHV"
+options:
+ id:
+ description:
+ - "ID of the cluster to manage."
+ type: str
+ name:
+ description:
+ - "Name of the cluster to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the cluster be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ data_center:
+ description:
+ - "Datacenter name where cluster reside."
+ type: str
+ description:
+ description:
+ - "Description of the cluster."
+ type: str
+ comment:
+ description:
+ - "Comment of the cluster."
+ type: str
+ network:
+ description:
+ - "Management network of cluster to access cluster hosts."
+ type: str
+ ballooning:
+ description:
+ - "If I(True) enable memory balloon optimization. Memory balloon is used to
+ re-distribute / reclaim the host memory based on VM needs
+ in a dynamic way."
+ type: bool
+ aliases: ['balloon']
+ virt:
+ description:
+ - "If I(True), hosts in this cluster will be used to run virtual machines."
+ type: bool
+ gluster:
+ description:
+ - "If I(True), hosts in this cluster will be used as Gluster Storage
+ server nodes, and not for running virtual machines."
+ - "By default the cluster is created for virtual machine hosts."
+ type: bool
+ threads_as_cores:
+ description:
+ - "If I(True) the exposed host threads would be treated as cores
+ which can be utilized by virtual machines."
+ type: bool
+ ksm:
+ description:
+ - "I I(True) MoM enables to run Kernel Same-page Merging I(KSM) when
+ necessary and when it can yield a memory saving benefit that
+ outweighs its CPU cost."
+ type: bool
+ ksm_numa:
+ description:
+ - "If I(True) enables KSM C(ksm) for best performance inside NUMA nodes."
+ type: bool
+ ha_reservation:
+ description:
+ - "If I(True) enables the oVirt/RHV to monitor cluster capacity for highly
+ available virtual machines."
+ type: bool
+ trusted_service:
+ description:
+ - "If I(True) enables integration with an OpenAttestation server."
+ type: bool
+ vm_reason:
+ description:
+ - "If I(True) enables an optional reason field when a virtual machine
+ is shut down from the Manager, allowing the administrator to
+ provide an explanation for the maintenance."
+ type: bool
+ host_reason:
+ description:
+ - "If I(True) enables an optional reason field when a host is placed
+ into maintenance mode from the Manager, allowing the administrator
+ to provide an explanation for the maintenance."
+ type: bool
+ memory_policy:
+ description:
+ - "I(disabled) - Disables memory page sharing."
+ - "I(server) - Sets the memory page sharing threshold to 150% of the system memory on each host."
+ - "I(desktop) - Sets the memory page sharing threshold to 200% of the system memory on each host."
+ choices: ['disabled', 'server', 'desktop']
+ type: str
+ aliases: ['performance_preset']
+ rng_sources:
+ description:
+ - "List that specify the random number generator devices that all hosts in the cluster will use."
+ - "Supported generators are: I(hwrng) and I(random)."
+ type: list
+ elements: str
+ spice_proxy:
+ description:
+ - "The proxy by which the SPICE client will connect to virtual machines."
+ - "The address must be in the following format: I(protocol://[host]:[port])"
+ type: str
+ fence_enabled:
+ description:
+ - "If I(True) enables fencing on the cluster."
+ - "Fencing is enabled by default."
+ type: bool
+ fence_skip_if_gluster_bricks_up:
+ description:
+ - "A flag indicating if fencing should be skipped if Gluster bricks are up and running in the host being fenced."
+ - "This flag is optional, and the default value is `false`."
+ type: bool
+ fence_skip_if_gluster_quorum_not_met:
+ description:
+ - "A flag indicating if fencing should be skipped if Gluster bricks are up and running and Gluster quorum will not
+ be met without those bricks."
+ - "This flag is optional, and the default value is `false`."
+ type: bool
+ fence_skip_if_sd_active:
+ description:
+ - "If I(True) any hosts in the cluster that are Non Responsive
+ and still connected to storage will not be fenced."
+ type: bool
+ fence_skip_if_connectivity_broken:
+ description:
+ - "If I(True) fencing will be temporarily disabled if the percentage
+ of hosts in the cluster that are experiencing connectivity issues
+ is greater than or equal to the defined threshold."
+ - "The threshold can be specified by C(fence_connectivity_threshold)."
+ type: bool
+ fence_connectivity_threshold:
+ description:
+ - "The threshold used by C(fence_skip_if_connectivity_broken)."
+ type: int
+ resilience_policy:
+ description:
+ - "The resilience policy defines how the virtual machines are prioritized in the migration."
+ - "Following values are supported:"
+ - "C(do_not_migrate) - Prevents virtual machines from being migrated. "
+ - "C(migrate) - Migrates all virtual machines in order of their defined priority."
+ - "C(migrate_highly_available) - Migrates only highly available virtual machines to prevent overloading other hosts."
+ choices: ['do_not_migrate', 'migrate', 'migrate_highly_available']
+ type: str
+ migration_bandwidth:
+ description:
+ - "The bandwidth settings define the maximum bandwidth of both outgoing and incoming migrations per host."
+ - "Following bandwidth options are supported:"
+ - "C(auto) - Bandwidth is copied from the I(rate limit) [Mbps] setting in the data center host network QoS."
+ - "C(hypervisor_default) - Bandwidth is controlled by local VDSM setting on sending host."
+ - "C(custom) - Defined by user (in Mbps)."
+ choices: ['auto', 'hypervisor_default', 'custom']
+ type: str
+ migration_bandwidth_limit:
+ description:
+ - "Set the I(custom) migration bandwidth limit."
+ - "This parameter is used only when C(migration_bandwidth) is I(custom)."
+ type: int
+ migration_auto_converge:
+ description:
+ - "If I(True) auto-convergence is used during live migration of virtual machines."
+ - "Used only when C(migration_policy) is set to I(legacy)."
+ - "Following options are supported:"
+ - "C(true) - Override the global setting to I(true)."
+ - "C(false) - Override the global setting to I(false)."
+ - "C(inherit) - Use value which is set globally."
+ choices: ['true', 'false', 'inherit']
+ type: str
+ migration_compressed:
+ description:
+ - "If I(True) compression is used during live migration of the virtual machine."
+ - "Used only when C(migration_policy) is set to I(legacy)."
+ - "Following options are supported:"
+ - "C(true) - Override the global setting to I(true)."
+ - "C(false) - Override the global setting to I(false)."
+ - "C(inherit) - Use value which is set globally."
+ choices: ['true', 'false', 'inherit']
+ type: str
+ migration_encrypted:
+ description:
+ - "If I(True) encryption is used during live migration of the virtual machine."
+ - "Following options are supported:"
+ - "C(true) - Override the global setting to I(true)."
+ - "C(false) - Override the global setting to I(false)."
+ - "C(inherit) - Use value which is set globally."
+ choices: ['true', 'false', 'inherit']
+ type: str
+ migration_policy:
+ description:
+ - "A migration policy defines the conditions for live migrating
+ virtual machines in the event of host failure."
+ - "Following policies are supported:"
+ - "C(legacy) - Legacy behavior of 3.6 version."
+ - "C(minimal_downtime) - Virtual machines should not experience any significant downtime."
+ - "C(suspend_workload) - Virtual machines may experience a more significant downtime."
+ - "C(post_copy) - Virtual machines should not experience any significant downtime.
+ If the VM migration is not converging for a long time, the migration will be switched to post-copy.
+ Added in version I(2.4)."
+ choices: ['legacy', 'minimal_downtime', 'suspend_workload', 'post_copy']
+ type: str
+ serial_policy:
+ description:
+ - "Specify a serial number policy for the virtual machines in the cluster."
+ - "Following options are supported:"
+ - "C(vm) - Sets the virtual machine's UUID as its serial number."
+ - "C(host) - Sets the host's UUID as the virtual machine's serial number."
+ - "C(custom) - Allows you to specify a custom serial number in C(serial_policy_value)."
+ choices: ['vm', 'host', 'custom']
+ type: str
+ serial_policy_value:
+ description:
+ - "Allows you to specify a custom serial number."
+ - "This parameter is used only when C(serial_policy) is I(custom)."
+ type: str
+ scheduling_policy:
+ description:
+ - "Name of the scheduling policy to be used for cluster."
+ type: str
+ scheduling_policy_properties:
+ description:
+ - "Custom scheduling policy properties of the cluster."
+ - "These optional properties override the properties of the
+ scheduling policy specified by the C(scheduling_policy) parameter."
+ suboptions:
+ name:
+ description:
+ - Name of the scheduling policy property.
+ value:
+ description:
+ - Value of scheduling policy property.
+ type: list
+ elements: dict
+ cpu_arch:
+ description:
+ - "CPU architecture of cluster."
+ choices: ['x86_64', 'ppc64', 'undefined']
+ type: str
+ cpu_type:
+ description:
+ - "CPU codename. For example I(Intel SandyBridge Family)."
+ type: str
+ switch_type:
+ description:
+ - "Type of switch to be used by all networks in given cluster.
+ Either I(legacy) which is using linux bridge or I(ovs) using
+ Open vSwitch."
+ choices: ['legacy', 'ovs']
+ type: str
+ compatibility_version:
+ description:
+ - "The compatibility version of the cluster. All hosts in this
+ cluster must support at least this compatibility version."
+ type: str
+ mac_pool:
+ description:
+ - "MAC pool to be used by this cluster."
+ - "C(Note:)"
+ - "This is supported since oVirt version 4.1."
+ type: str
+ external_network_providers:
+ description:
+ - "List of references to the external network providers available
+ in the cluster. If the automatic deployment of the external
+ network provider is supported, the networks of the referenced
+ network provider are available on every host in the cluster."
+ - "This is supported since oVirt version 4.2."
+ suboptions:
+ name:
+ description:
+ - Name of the external network provider. Either C(name) or C(id) is required.
+ id:
+ description:
+ - ID of the external network provider. Either C(name) or C(id) is required.
+ type: list
+ elements: dict
+ firewall_type:
+ description:
+ - "The type of firewall to be used on hosts in this cluster."
+ - "Up to version 4.1, it was always I(iptables). Since version 4.2, you can choose between I(iptables) and I(firewalld).
+ For clusters with a compatibility version of 4.2 and higher, the default firewall type is I(firewalld)."
+ type: str
+ choices: ['firewalld', 'iptables']
+ gluster_tuned_profile:
+ description:
+ - "The name of the U(https://fedorahosted.org/tuned) to set on all the hosts in the cluster. This is not mandatory
+ and relevant only for clusters with Gluster service."
+ - "Could be for example I(virtual-host), I(rhgs-sequential-io), I(rhgs-random-io)"
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create cluster
+- ovirt.ovirt.ovirt_cluster:
+ data_center: mydatacenter
+ name: mycluster
+ cpu_type: Intel SandyBridge Family
+ description: mycluster
+ compatibility_version: 4.0
+
+# Create virt service cluster:
+- ovirt.ovirt.ovirt_cluster:
+ data_center: mydatacenter
+ name: mycluster
+ cpu_type: Intel Nehalem Family
+ description: mycluster
+ switch_type: legacy
+ compatibility_version: 4.0
+ ballooning: true
+ gluster: false
+ threads_as_cores: true
+ ha_reservation: true
+ trusted_service: false
+ host_reason: false
+ vm_reason: true
+ ksm_numa: true
+ memory_policy: server
+ rng_sources:
+ - hwrng
+ - random
+
+# Create cluster with default network provider
+- ovirt.ovirt.ovirt_cluster:
+ name: mycluster
+ data_center: Default
+ cpu_type: Intel SandyBridge Family
+ external_network_providers:
+ - name: ovirt-provider-ovn
+
+# Remove cluster
+- ovirt.ovirt.ovirt_cluster:
+ state: absent
+ name: mycluster
+
+# Change cluster Name
+- ovirt.ovirt.ovirt_cluster:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_cluster_name"
+'''
+
+RETURN = '''
+id:
+ description: ID of the cluster which is managed
+ returned: On success if cluster is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+cluster:
+ description: "Dictionary of all the cluster attributes. Cluster attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
+ type: dict
+ returned: On success if cluster is found.
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+ get_id_by_name,
+)
+
+
+class ClustersModule(BaseModule):
+
+ def __get_major(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.major
+ return int(full_version.split('.')[0])
+
+ def __get_minor(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.minor
+ return int(full_version.split('.')[1])
+
+ def param(self, name, default=None):
+ return self._module.params.get(name, default)
+
+ def _get_memory_policy(self):
+ memory_policy = self.param('memory_policy')
+ if memory_policy == 'desktop':
+ return 200
+ elif memory_policy == 'server':
+ return 150
+ elif memory_policy == 'disabled':
+ return 100
+
+ def _get_policy_id(self):
+ # These are hardcoded IDs, once there is API, please fix this.
+ # legacy - 00000000-0000-0000-0000-000000000000
+ # minimal downtime - 80554327-0569-496b-bdeb-fcbbf52b827b
+ # suspend workload if needed - 80554327-0569-496b-bdeb-fcbbf52b827c
+ # post copy - a7aeedb2-8d66-4e51-bb22-32595027ce71
+ migration_policy = self.param('migration_policy')
+ if migration_policy == 'legacy':
+ return '00000000-0000-0000-0000-000000000000'
+ elif migration_policy == 'minimal_downtime':
+ return '80554327-0569-496b-bdeb-fcbbf52b827b'
+ elif migration_policy == 'suspend_workload':
+ return '80554327-0569-496b-bdeb-fcbbf52b827c'
+ elif migration_policy == 'post_copy':
+ return 'a7aeedb2-8d66-4e51-bb22-32595027ce71'
+
+ def _get_sched_policy(self):
+ sched_policy = None
+ if self.param('scheduling_policy'):
+ sched_policies_service = self._connection.system_service().scheduling_policies_service()
+ sched_policy = search_by_name(sched_policies_service, self.param('scheduling_policy'))
+ if not sched_policy:
+ raise Exception("Scheduling policy '%s' was not found" % self.param('scheduling_policy'))
+
+ return sched_policy
+
+ def _get_mac_pool(self):
+ mac_pool = None
+ if self._module.params.get('mac_pool'):
+ mac_pool = search_by_name(
+ self._connection.system_service().mac_pools_service(),
+ self._module.params.get('mac_pool'),
+ )
+
+ return mac_pool
+
+ def _get_external_network_providers(self):
+ return self.param('external_network_providers') or []
+
+ def _get_external_network_provider_id(self, external_provider):
+ return external_provider.get('id') or get_id_by_name(
+ self._connection.system_service().openstack_network_providers_service(),
+ external_provider.get('name')
+ )
+
+ def _get_external_network_providers_entity(self):
+ if self.param('external_network_providers') is not None:
+ return [otypes.ExternalProvider(id=self._get_external_network_provider_id(external_provider))
+ for external_provider in self.param('external_network_providers')]
+
+ def build_entity(self):
+ sched_policy = self._get_sched_policy()
+ return otypes.Cluster(
+ id=self.param('id'),
+ name=self.param('name'),
+ comment=self.param('comment'),
+ description=self.param('description'),
+ ballooning_enabled=self.param('ballooning'),
+ gluster_service=self.param('gluster'),
+ virt_service=self.param('virt'),
+ threads_as_cores=self.param('threads_as_cores'),
+ ha_reservation=self.param('ha_reservation'),
+ trusted_service=self.param('trusted_service'),
+ optional_reason=self.param('vm_reason'),
+ maintenance_reason_required=self.param('host_reason'),
+ scheduling_policy=otypes.SchedulingPolicy(
+ id=sched_policy.id,
+ ) if sched_policy else None,
+ serial_number=otypes.SerialNumber(
+ policy=otypes.SerialNumberPolicy(self.param('serial_policy')),
+ value=self.param('serial_policy_value'),
+ ) if (
+ self.param('serial_policy') is not None or
+ self.param('serial_policy_value') is not None
+ ) else None,
+ migration=otypes.MigrationOptions(
+ auto_converge=otypes.InheritableBoolean(
+ self.param('migration_auto_converge'),
+ ) if self.param('migration_auto_converge') else None,
+ bandwidth=otypes.MigrationBandwidth(
+ assignment_method=otypes.MigrationBandwidthAssignmentMethod(
+ self.param('migration_bandwidth'),
+ ) if self.param('migration_bandwidth') else None,
+ custom_value=self.param('migration_bandwidth_limit'),
+ ) if (
+ self.param('migration_bandwidth') or
+ self.param('migration_bandwidth_limit')
+ ) else None,
+ compressed=otypes.InheritableBoolean(
+ self.param('migration_compressed'),
+ ) if self.param('migration_compressed') else None,
+ encrypted=otypes.InheritableBoolean(
+ self.param('migration_encrypted'),
+ ) if self.param('migration_encrypted') else None,
+ policy=otypes.MigrationPolicy(
+ id=self._get_policy_id()
+ ) if self.param('migration_policy') else None,
+ ) if (
+ self.param('migration_bandwidth') is not None or
+ self.param('migration_bandwidth_limit') is not None or
+ self.param('migration_auto_converge') is not None or
+ self.param('migration_compressed') is not None or
+ self.param('migration_encrypted') is not None or
+ self.param('migration_policy') is not None
+ ) else None,
+ error_handling=otypes.ErrorHandling(
+ on_error=otypes.MigrateOnError(
+ self.param('resilience_policy')
+ ),
+ ) if self.param('resilience_policy') else None,
+ fencing_policy=otypes.FencingPolicy(
+ enabled=self.param('fence_enabled'),
+ skip_if_gluster_bricks_up=self.param('fence_skip_if_gluster_bricks_up'),
+ skip_if_gluster_quorum_not_met=self.param('fence_skip_if_gluster_quorum_not_met'),
+ skip_if_connectivity_broken=otypes.SkipIfConnectivityBroken(
+ enabled=self.param('fence_skip_if_connectivity_broken'),
+ threshold=self.param('fence_connectivity_threshold'),
+ ) if (
+ self.param('fence_skip_if_connectivity_broken') is not None or
+ self.param('fence_connectivity_threshold') is not None
+ ) else None,
+ skip_if_sd_active=otypes.SkipIfSdActive(
+ enabled=self.param('fence_skip_if_sd_active'),
+ ) if self.param('fence_skip_if_sd_active') is not None else None,
+ ) if (
+ self.param('fence_enabled') is not None or
+ self.param('fence_skip_if_sd_active') is not None or
+ self.param('fence_skip_if_connectivity_broken') is not None or
+ self.param('fence_skip_if_gluster_bricks_up') is not None or
+ self.param('fence_skip_if_gluster_quorum_not_met') is not None or
+ self.param('fence_connectivity_threshold') is not None
+ ) else None,
+ display=otypes.Display(
+ proxy=self.param('spice_proxy'),
+ ) if self.param('spice_proxy') else None,
+ required_rng_sources=[
+ otypes.RngSource(rng) for rng in self.param('rng_sources')
+ ] if self.param('rng_sources') else None,
+ memory_policy=otypes.MemoryPolicy(
+ over_commit=otypes.MemoryOverCommit(
+ percent=self._get_memory_policy(),
+ ),
+ ) if self.param('memory_policy') else None,
+ ksm=otypes.Ksm(
+ enabled=self.param('ksm'),
+ merge_across_nodes=not self.param('ksm_numa'),
+ ) if (
+ self.param('ksm_numa') is not None or
+ self.param('ksm') is not None
+ ) else None,
+ data_center=otypes.DataCenter(
+ name=self.param('data_center'),
+ ) if self.param('data_center') else None,
+ management_network=otypes.Network(
+ name=self.param('network'),
+ ) if self.param('network') else None,
+ cpu=otypes.Cpu(
+ architecture=otypes.Architecture(
+ self.param('cpu_arch')
+ ) if self.param('cpu_arch') else None,
+ type=self.param('cpu_type'),
+ ) if (
+ self.param('cpu_arch') or self.param('cpu_type')
+ ) else None,
+ version=otypes.Version(
+ major=self.__get_major(self.param('compatibility_version')),
+ minor=self.__get_minor(self.param('compatibility_version')),
+ ) if self.param('compatibility_version') else None,
+ switch_type=otypes.SwitchType(
+ self.param('switch_type')
+ ) if self.param('switch_type') else None,
+ mac_pool=otypes.MacPool(
+ id=get_id_by_name(self._connection.system_service().mac_pools_service(), self.param('mac_pool'))
+ ) if self.param('mac_pool') else None,
+ external_network_providers=self._get_external_network_providers_entity(),
+ custom_scheduling_policy_properties=[
+ otypes.Property(
+ name=sp.get('name'),
+ value=str(sp.get('value')),
+ ) for sp in self.param('scheduling_policy_properties') if sp
+ ] if self.param('scheduling_policy_properties') is not None else None,
+ firewall_type=otypes.FirewallType(
+ self.param('firewall_type')
+ ) if self.param('firewall_type') else None,
+ gluster_tuned_profile=self.param('gluster_tuned_profile'),
+ )
+
+ def _matches_entity(self, item, entity):
+ return equal(item.get('id'), entity.id) and equal(item.get('name'), entity.name)
+
+ def _update_check_external_network_providers(self, entity):
+ if self.param('external_network_providers') is None:
+ return True
+ if entity.external_network_providers is None:
+ return not self.param('external_network_providers')
+ entity_providers = self._connection.follow_link(entity.external_network_providers)
+ entity_provider_ids = [provider.id for provider in entity_providers]
+ entity_provider_names = [provider.name for provider in entity_providers]
+ for provider in self._get_external_network_providers():
+ if provider.get('id'):
+ if provider.get('id') not in entity_provider_ids:
+ return False
+ elif provider.get('name') and provider.get('name') not in entity_provider_names:
+ return False
+ for entity_provider in entity_providers:
+ if not any([self._matches_entity(provider, entity_provider)
+ for provider in self._get_external_network_providers()]):
+ return False
+ return True
+
+ def update_check(self, entity):
+ sched_policy = self._get_sched_policy()
+ migration_policy = getattr(entity.migration, 'policy', None)
+ cluster_cpu = getattr(entity, 'cpu', dict())
+
+ def check_custom_scheduling_policy_properties():
+ if self.param('scheduling_policy_properties'):
+ current = []
+ if entity.custom_scheduling_policy_properties:
+ current = [(sp.name, str(sp.value)) for sp in entity.custom_scheduling_policy_properties]
+ passed = [(sp.get('name'), str(sp.get('value'))) for sp in self.param('scheduling_policy_properties') if sp]
+ for p in passed:
+ if p not in current:
+ return False
+ return True
+
+ return (
+ check_custom_scheduling_policy_properties() and
+ equal(self.param('name'), entity.name) and
+ equal(self.param('comment'), entity.comment) and
+ equal(self.param('description'), entity.description) and
+ equal(self.param('switch_type'), str(entity.switch_type)) and
+ equal(self.param('cpu_arch'), str(getattr(cluster_cpu, 'architecture', None))) and
+ equal(self.param('cpu_type'), getattr(cluster_cpu, 'type', None)) and
+ equal(self.param('ballooning'), entity.ballooning_enabled) and
+ equal(self.param('gluster'), entity.gluster_service) and
+ equal(self.param('virt'), entity.virt_service) and
+ equal(self.param('threads_as_cores'), entity.threads_as_cores) and
+ equal(self.param('ksm_numa'), not entity.ksm.merge_across_nodes) and
+ equal(self.param('ksm'), entity.ksm.enabled) and
+ equal(self.param('ha_reservation'), entity.ha_reservation) and
+ equal(self.param('trusted_service'), entity.trusted_service) and
+ equal(self.param('host_reason'), entity.maintenance_reason_required) and
+ equal(self.param('vm_reason'), entity.optional_reason) and
+ equal(self.param('spice_proxy'), getattr(entity.display, 'proxy', None)) and
+ equal(self.param('fence_enabled'), entity.fencing_policy.enabled) and
+ equal(self.param('fence_skip_if_gluster_bricks_up'), entity.fencing_policy.skip_if_gluster_bricks_up) and
+ equal(self.param('fence_skip_if_gluster_quorum_not_met'), entity.fencing_policy.skip_if_gluster_quorum_not_met) and
+ equal(self.param('fence_skip_if_sd_active'), entity.fencing_policy.skip_if_sd_active.enabled) and
+ equal(self.param('fence_skip_if_connectivity_broken'), entity.fencing_policy.skip_if_connectivity_broken.enabled) and
+ equal(self.param('fence_connectivity_threshold'), entity.fencing_policy.skip_if_connectivity_broken.threshold) and
+ equal(self.param('resilience_policy'), str(entity.error_handling.on_error)) and
+ equal(self.param('migration_bandwidth'), str(entity.migration.bandwidth.assignment_method)) and
+ equal(self.param('migration_auto_converge'), str(entity.migration.auto_converge)) and
+ equal(self.param('migration_compressed'), str(entity.migration.compressed)) and
+ equal(self.param('migration_encrypted'), str(entity.migration.encrypted)) and
+ equal(self.param('serial_policy'), str(getattr(entity.serial_number, 'policy', None))) and
+ equal(self.param('serial_policy_value'), getattr(entity.serial_number, 'value', None)) and
+ equal(self.param('scheduling_policy'), getattr(self._connection.follow_link(entity.scheduling_policy), 'name', None)) and
+ equal(self.param('firewall_type'), str(entity.firewall_type)) and
+ equal(self.param('gluster_tuned_profile'), getattr(entity, 'gluster_tuned_profile', None)) and
+ equal(self._get_policy_id(), getattr(migration_policy, 'id', None)) and
+ equal(self._get_memory_policy(), entity.memory_policy.over_commit.percent) and
+ equal(self.__get_minor(self.param('compatibility_version')), self.__get_minor(entity.version)) and
+ equal(self.__get_major(self.param('compatibility_version')), self.__get_major(entity.version)) and
+ equal(
+ self.param('migration_bandwidth_limit') if self.param('migration_bandwidth') == 'custom' else None,
+ entity.migration.bandwidth.custom_value
+ ) and
+ equal(
+ sorted(self.param('rng_sources')) if self.param('rng_sources') else None,
+ sorted([
+ str(source) for source in entity.required_rng_sources
+ ])
+ ) and
+ equal(
+ get_id_by_name(self._connection.system_service().mac_pools_service(), self.param('mac_pool'), raise_error=False),
+ entity.mac_pool.id
+ ) and
+ self._update_check_external_network_providers(entity)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ id=dict(default=None),
+ ballooning=dict(default=None, type='bool', aliases=['balloon']),
+ gluster=dict(default=None, type='bool'),
+ virt=dict(default=None, type='bool'),
+ threads_as_cores=dict(default=None, type='bool'),
+ ksm_numa=dict(default=None, type='bool'),
+ ksm=dict(default=None, type='bool'),
+ ha_reservation=dict(default=None, type='bool'),
+ trusted_service=dict(default=None, type='bool'),
+ vm_reason=dict(default=None, type='bool'),
+ host_reason=dict(default=None, type='bool'),
+ memory_policy=dict(default=None, choices=['disabled', 'server', 'desktop'], aliases=['performance_preset']),
+ rng_sources=dict(default=None, type='list', elements='str'),
+ spice_proxy=dict(default=None),
+ fence_enabled=dict(default=None, type='bool'),
+ fence_skip_if_gluster_bricks_up=dict(default=None, type='bool'),
+ fence_skip_if_gluster_quorum_not_met=dict(default=None, type='bool'),
+ fence_skip_if_sd_active=dict(default=None, type='bool'),
+ fence_skip_if_connectivity_broken=dict(default=None, type='bool'),
+ fence_connectivity_threshold=dict(default=None, type='int'),
+ resilience_policy=dict(default=None, choices=['migrate_highly_available', 'migrate', 'do_not_migrate']),
+ migration_bandwidth=dict(default=None, choices=['auto', 'hypervisor_default', 'custom']),
+ migration_bandwidth_limit=dict(default=None, type='int'),
+ migration_auto_converge=dict(default=None, choices=['true', 'false', 'inherit']),
+ migration_compressed=dict(default=None, choices=['true', 'false', 'inherit']),
+ migration_encrypted=dict(default=None, choices=['true', 'false', 'inherit']),
+ migration_policy=dict(
+ default=None,
+ choices=['legacy', 'minimal_downtime', 'suspend_workload', 'post_copy']
+ ),
+ serial_policy=dict(default=None, choices=['vm', 'host', 'custom']),
+ serial_policy_value=dict(default=None),
+ scheduling_policy=dict(default=None),
+ data_center=dict(default=None),
+ description=dict(default=None),
+ comment=dict(default=None),
+ network=dict(default=None),
+ cpu_arch=dict(default=None, choices=['ppc64', 'undefined', 'x86_64']),
+ cpu_type=dict(default=None),
+ switch_type=dict(default=None, choices=['legacy', 'ovs']),
+ compatibility_version=dict(default=None),
+ mac_pool=dict(default=None),
+ external_network_providers=dict(default=None, type='list', elements='dict'),
+ scheduling_policy_properties=dict(type='list', elements='dict'),
+ firewall_type=dict(choices=['iptables', 'firewalld'], default=None),
+ gluster_tuned_profile=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ clusters_service = connection.system_service().clusters_service()
+ clusters_module = ClustersModule(
+ connection=connection,
+ module=module,
+ service=clusters_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = clusters_module.create()
+ elif state == 'absent':
+ ret = clusters_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster_info.py
new file mode 100644
index 00000000..c593f9a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_cluster_info.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_cluster_info
+short_description: Retrieve information about one or more oVirt/RHV clusters
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV clusters."
+ - This module was called C(ovirt_cluster_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_cluster_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_clusters), which
+ contains a list of clusters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search cluster X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all clusters which names start with C<production>:
+- ovirt.ovirt.ovirt_cluster_info:
+ pattern:
+ name: 'production*'
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_clusters }}"
+'''
+
+RETURN = '''
+ovirt_clusters:
+ description: "List of dictionaries describing the clusters. Cluster attributes are mapped to dictionary keys,
+ all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ clusters_service = connection.system_service().clusters_service()
+ clusters = clusters_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_clusters=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in clusters
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter.py
new file mode 100644
index 00000000..69acb71d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_datacenter
+short_description: Module to manage data centers in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage data centers in oVirt/RHV"
+options:
+ id:
+ description:
+ - "ID of the datacenter to manage."
+ type: str
+ name:
+ description:
+ - "Name of the data center to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the data center be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ description:
+ description:
+ - "Description of the data center."
+ type: str
+ comment:
+ description:
+ - "Comment of the data center."
+ type: str
+ local:
+ description:
+ - "I(True) if the data center should be local, I(False) if should be shared."
+ - "Default value is set by engine."
+ type: bool
+ compatibility_version:
+ description:
+ - "Compatibility version of the data center."
+ type: str
+ quota_mode:
+ description:
+ - "Quota mode of the data center. One of I(disabled), I(audit) or I(enabled)"
+ choices: ['disabled', 'audit', 'enabled']
+ type: str
+ mac_pool:
+ description:
+ - "MAC pool to be used by this datacenter."
+ - "IMPORTANT: This option is deprecated in oVirt/RHV 4.1. You should
+ use C(mac_pool) in C(ovirt_clusters) module, as MAC pools are
+ set per cluster since 4.1."
+ type: str
+ force:
+ description:
+ - "This parameter can be used only when removing a data center.
+ If I(True) data center will be forcibly removed, even though it
+ contains some clusters. Default value is I(False), which means
+ that only empty data center can be removed."
+ default: False
+ type: bool
+ iscsi_bonds:
+ description:
+ - "List of iscsi bonds, which should be created in datacenter."
+ suboptions:
+ name:
+ description:
+ - "Name of the iscsi bond."
+ type: str
+ networks:
+ description:
+ - "List of network names in bond."
+ type: list
+ storage_domains:
+ description:
+ - "List of storage domain names and it will automatically get all storage_connections in the domain."
+ type: list
+ default: []
+ storage_connections:
+ description:
+ - "List of storage_connection IDs. Used when you want to use specific storage connection instead of all in storage domain."
+ type: list
+ default: []
+ type: list
+ elements: dict
+
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create datacenter
+- ovirt.ovirt.ovirt_datacenter:
+ name: mydatacenter
+ local: True
+ compatibility_version: 4.0
+ quota_mode: enabled
+
+# Remove datacenter
+- ovirt.ovirt.ovirt_datacenter:
+ state: absent
+ name: mydatacenter
+
+# Change Datacenter Name
+- ovirt.ovirt.ovirt_datacenter:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_datacenter_name"
+
+# Create datacenter with iscsi bond
+- ovirt.ovirt.ovirt_datacenter:
+ name: mydatacenter
+ iscsi_bonds:
+ - name: bond1
+ networks:
+ - network1
+ - network2
+ storage_domains:
+ - storage1
+ - name: bond2
+ networks:
+ - network3
+ storage_connections:
+ - cf780201-6a4f-43c1-a019-e65c4220ab73
+
+# Remove all iscsi bonds
+- ovirt.ovirt.ovirt_datacenter:
+ name: mydatacenter
+ iscsi_bonds: []
+'''
+
+RETURN = '''
+id:
+ description: "ID of the managed datacenter"
+ returned: "On success if datacenter is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+data_center:
+ description: "Dictionary of all the datacenter attributes. Datacenter attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/datacenter."
+ returned: "On success if datacenter is found."
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+ follow_link,
+ get_id_by_name
+)
+
+
+class DatacentersModule(BaseModule):
+
+ def __get_major(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.major
+ return int(full_version.split('.')[0])
+
+ def __get_minor(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.minor
+ return int(full_version.split('.')[1])
+
+ def _get_mac_pool(self):
+ mac_pool = None
+ if self._module.params.get('mac_pool'):
+ mac_pool = search_by_name(
+ self._connection.system_service().mac_pools_service(),
+ self._module.params.get('mac_pool'),
+ )
+ return mac_pool
+
+ def build_entity(self):
+ return otypes.DataCenter(
+ name=self._module.params['name'],
+ id=self._module.params['id'],
+ comment=self._module.params['comment'],
+ description=self._module.params['description'],
+ mac_pool=otypes.MacPool(
+ id=getattr(self._get_mac_pool(), 'id', None),
+ ) if self._module.params.get('mac_pool') else None,
+ quota_mode=otypes.QuotaModeType(
+ self._module.params['quota_mode']
+ ) if self._module.params['quota_mode'] else None,
+ local=self._module.params['local'],
+ version=otypes.Version(
+ major=self.__get_major(self._module.params['compatibility_version']),
+ minor=self.__get_minor(self._module.params['compatibility_version']),
+ ) if self._module.params['compatibility_version'] else None,
+ )
+
+ def update_check(self, entity):
+ minor = self.__get_minor(self._module.params.get('compatibility_version'))
+ major = self.__get_major(self._module.params.get('compatibility_version'))
+ return (
+ equal(getattr(self._get_mac_pool(), 'id', None), getattr(entity.mac_pool, 'id', None)) and
+ equal(self._module.params.get('comment'), entity.comment) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('quota_mode'), str(entity.quota_mode)) and
+ equal(self._module.params.get('local'), entity.local) and
+ equal(minor, self.__get_minor(entity.version)) and
+ equal(major, self.__get_major(entity.version))
+ )
+
+
+def get_storage_connections(iscsi_bond, connection):
+ resp = []
+ for storage_domain_name in iscsi_bond.get('storage_domains', []):
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domain = storage_domains_service.storage_domain_service(
+ get_id_by_name(storage_domains_service, storage_domain_name)).get()
+ resp.extend(connection.follow_link(storage_domain.storage_connections))
+
+ for storage_connection_id in iscsi_bond.get('storage_connections', []):
+ resp.append(connection.system_service().storage_connections_service(
+ ).storage_connection_service(storage_connection_id).get())
+ return resp
+
+
+def serialize_iscsi_bond(iscsi_bonds):
+ return [{"name": bond.name,
+ "networks": [net.name for net in bond.networks],
+ "storage_connections": [connection.address for connection in bond.storage_connections]} for bond in iscsi_bonds]
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ description=dict(default=None),
+ local=dict(type='bool'),
+ id=dict(default=None),
+ compatibility_version=dict(default=None),
+ quota_mode=dict(choices=['disabled', 'audit', 'enabled']),
+ comment=dict(default=None),
+ mac_pool=dict(default=None),
+ force=dict(default=None, type='bool'),
+ iscsi_bonds=dict(type='list', default=None, elements='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ data_centers_service = connection.system_service().data_centers_service()
+ data_centers_module = DatacentersModule(
+ connection=connection,
+ module=module,
+ service=data_centers_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = data_centers_module.create()
+ if module.params.get('iscsi_bonds') is not None:
+ iscsi_bonds_service = data_centers_service.data_center_service(
+ ret.get('id')).iscsi_bonds_service()
+ before_iscsi_bonds = iscsi_bonds_service.list()
+ networks_service = connection.system_service().networks_service()
+ # Remove existing bonds
+ for bond in iscsi_bonds_service.list():
+ iscsi_bonds_service.iscsi_bond_service(bond.id).remove()
+ # Create new bond
+ for new_bond in module.params.get('iscsi_bonds'):
+ iscsi_bond = otypes.IscsiBond(
+ name=new_bond.get('name'),
+ data_center=data_centers_service.data_center_service(
+ ret.get('id')).get(),
+ storage_connections=get_storage_connections(
+ new_bond, connection),
+ networks=[search_by_name(networks_service, network_name)
+ for network_name in new_bond.get('networks')],
+ )
+ iscsi_bonds_service.add(iscsi_bond)
+ ret['changed'] = ret['changed'] or serialize_iscsi_bond(
+ before_iscsi_bonds) != serialize_iscsi_bond(iscsi_bonds_service.list())
+ elif state == 'absent':
+ ret = data_centers_module.remove(force=module.params['force'])
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter_info.py
new file mode 100644
index 00000000..69781eaf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_datacenter_info.py
@@ -0,0 +1,97 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_datacenter_info
+short_description: Retrieve information about one or more oVirt/RHV datacenters
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV datacenters."
+ - This module was called C(ovirt_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_datacenter_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_datacenters), which
+ contains a list of datacenters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search datacenter I(X) use following pattern: I(name=X)"
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all data centers which names start with C(production):
+- ovirt.ovirt.ovirt_datacenter_info:
+ pattern: name=production*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_datacenters }}"
+'''
+
+RETURN = '''
+ovirt_datacenters:
+ description: "List of dictionaries describing the datacenters. Datacenter attributes are mapped to dictionary keys,
+ all datacenters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/data_center."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ datacenters = datacenters_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_datacenters=[
+ get_dict_of_struct(
+ struct=d,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for d in datacenters
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk.py
new file mode 100644
index 00000000..414afb56
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk.py
@@ -0,0 +1,921 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_disk
+short_description: "Module to manage Virtual Machine and floating disks in oVirt/RHV"
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage Virtual Machine and floating disks in oVirt/RHV."
+ - "WARNING: If you are installing the collection from ansible galaxy you need to install 'qemu-img' package."
+options:
+ id:
+ description:
+ - "ID of the disk to manage. Either C(id) or C(name) is required."
+ type: str
+ name:
+ description:
+ - "Name of the disk to manage. Either C(id) or C(name)/C(alias) is required."
+ aliases: ['alias']
+ type: str
+ description:
+ description:
+ - "Description of the disk image to manage."
+ type: str
+ vm_name:
+ description:
+ - "Name of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
+ type: str
+ vm_id:
+ description:
+ - "ID of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
+ type: str
+ state:
+ description:
+ - "Should the Virtual Machine disk be present/absent/attached/detached/exported/imported."
+ choices: ['present', 'absent', 'attached', 'detached', 'exported', 'imported']
+ default: 'present'
+ type: str
+ download_image_path:
+ description:
+ - "Path on a file system where disk should be downloaded."
+ - "Note that you must have an valid oVirt/RHV engine CA in your system trust store
+ or you must provide it in C(ca_file) parameter."
+ - "Note that the disk is not downloaded when the file already exists,
+ but you can forcibly download the disk when using C(force) I (true)."
+ type: str
+ upload_image_path:
+ description:
+ - "Path to disk image, which should be uploaded."
+ - "Note if C(size) is not specified the size of the disk will be determined by the size of the specified image."
+ - "Note that currently we support only compatibility version 0.10 of the qcow disk."
+ - "Note that you must have an valid oVirt/RHV engine CA in your system trust store
+ or you must provide it in C(ca_file) parameter."
+ - "Note that there is no reliable way to achieve idempotency, so
+ if you want to upload the disk even if the disk with C(id) or C(name) exists,
+ then please use C(force) I(true). If you will use C(force) I(false), which
+ is default, then the disk image won't be uploaded."
+ - "Note that to upload iso the C(format) should be 'raw'"
+ type: str
+ aliases: ['image_path']
+ size:
+ description:
+ - "Size of the disk. Size should be specified using IEC standard units.
+ For example 10GiB, 1024MiB, etc."
+ - "Size can be only increased, not decreased."
+ type: str
+ interface:
+ description:
+ - "Driver of the storage interface."
+ - "It's required parameter when creating the new disk."
+ choices: ['virtio', 'ide', 'virtio_scsi']
+ type: str
+ format:
+ description:
+ - Specify format of the disk.
+ - Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
+ choices: ['raw', 'cow']
+ default: 'cow'
+ type: str
+ content_type:
+ description:
+ - Specify if the disk is a data disk or ISO image or a one of a the Hosted Engine disk types
+ - The Hosted Engine disk content types are available with Engine 4.3+ and Ansible 2.8
+ choices: ['data', 'iso', 'hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration']
+ default: 'data'
+ type: str
+ sparse:
+ required: False
+ type: bool
+ description:
+ - "I(True) if the disk should be sparse (also known as I(thin provision)).
+ If the parameter is omitted, cow disks will be created as sparse and raw disks as I(preallocated)"
+ - Note that this option isn't idempotent as it's not currently possible to change sparseness of the disk via API.
+ storage_domain:
+ description:
+ - "Storage domain name where disk should be created."
+ type: str
+ storage_domains:
+ description:
+ - "Storage domain names where disk should be copied."
+ - "C(**IMPORTANT**)"
+ - "There is no reliable way to achieve idempotency, so every time
+ you specify this parameter the disks are copied, so please handle
+ your playbook accordingly to not copy the disks all the time. This
+ is valid only for VM and floating disks, template disks works
+ as expected."
+ type: list
+ elements: str
+ force:
+ description:
+ - "Please take a look at C(image_path) documentation to see the correct
+ usage of this parameter."
+ type: bool
+ profile:
+ description:
+ - "Disk profile name to be attached to disk. By default profile is chosen by oVirt/RHV engine."
+ type: str
+ quota_id:
+ description:
+ - "Disk quota ID to be used for disk. By default quota is chosen by oVirt/RHV engine."
+ type: str
+ bootable:
+ description:
+ - "I(True) if the disk should be bootable. By default when disk is created it isn't bootable."
+ type: bool
+ default: 'no'
+ shareable:
+ description:
+ - "I(True) if the disk should be shareable. By default when disk is created it isn't shareable."
+ type: bool
+ logical_unit:
+ description:
+ - "Dictionary which describes LUN to be directly attached to VM:"
+ suboptions:
+ address:
+ description:
+ - Address of the storage server. Used by iSCSI.
+ port:
+ description:
+ - Port of the storage server. Used by iSCSI.
+ target:
+ description:
+ - iSCSI target.
+ lun_id:
+ description:
+ - LUN id.
+ username:
+ description:
+ - CHAP Username to be used to access storage server. Used by iSCSI.
+ password:
+ description:
+ - CHAP Password of the user to be used to access storage server. Used by iSCSI.
+ storage_type:
+ description:
+ - Storage type either I(fcp) or I(iscsi).
+ type: dict
+ sparsify:
+ description:
+ - "I(True) if the disk should be sparsified."
+ - "Sparsification frees space in the disk image that is not used by
+ its filesystem. As a result, the image will occupy less space on
+ the storage."
+ - "Note that this parameter isn't idempotent, as it's not possible
+ to check if the disk should be or should not be sparsified."
+ type: bool
+ openstack_volume_type:
+ description:
+ - "Name of the openstack volume type. This is valid when working
+ with cinder."
+ type: str
+ image_provider:
+ description:
+ - "When C(state) is I(exported) disk is exported to given Glance image provider."
+ - "When C(state) is I(imported) disk is imported from given Glance image provider."
+ - "C(**IMPORTANT**)"
+ - "There is no reliable way to achieve idempotency, so every time
+ you specify this parameter the disk is exported, so please handle
+ your playbook accordingly to not export the disk all the time.
+ This option is valid only for template disks."
+ type: str
+ host:
+ description:
+ - "When the hypervisor name is specified the newly created disk or
+ an existing disk will refresh its information about the
+ underlying storage( Disk size, Serial, Product ID, Vendor ID ...)
+ The specified host will be used for gathering the storage
+ related information. This option is only valid for passthrough
+ disks. This option requires at least the logical_unit.id to be
+ specified"
+ type: str
+ wipe_after_delete:
+ description:
+ - "If the disk's Wipe After Delete is enabled, then the disk is first wiped."
+ type: bool
+ activate:
+ description:
+ - I(True) if the disk should be activated.
+ - When creating disk of virtual machine it is set to I(True).
+ type: bool
+ backup:
+ description:
+ - The backup behavior supported by the disk.
+ choices: ['incremental']
+ version_added: 1.1.0
+ type: str
+ scsi_passthrough:
+ description:
+ - Indicates whether SCSI passthrough is enable and its policy.
+ - Setting a value of `filtered`/`unfiltered` will enable SCSI passthrough for a LUN disk with unprivileged/privileged SCSI I/O.
+ - To disable SCSI passthrough the value should be set to `disabled`
+ choices: ['disabled', 'filtered', 'unfiltered']
+ type: str
+ version_added: 1.2.0
+ propagate_errors:
+ description:
+ - Indicates if disk errors should cause virtual machine to be paused or if disk errors should be
+ - propagated to the the guest operating system instead.
+ type: bool
+ version_added: 1.2.0
+ pass_discard:
+ description:
+ - Defines whether the virtual machine passes discard commands to the storage.
+ type: bool
+ version_added: 1.2.0
+ uses_scsi_reservation:
+ description:
+ - Defines whether SCSI reservation is enabled for this disk.
+ type: bool
+ version_added: 1.2.0
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create and attach new disk to VM
+- ovirt.ovirt.ovirt_disk:
+ name: myvm_disk
+ vm_name: rhel7
+ size: 10GiB
+ format: cow
+ interface: virtio
+ storage_domain: data
+
+# Attach logical unit to VM rhel7
+- ovirt.ovirt.ovirt_disk:
+ vm_name: rhel7
+ logical_unit:
+ target: iqn.2016-08-09.brq.str-01:omachace
+ id: 1IET_000d0001
+ address: 10.34.63.204
+ interface: virtio
+
+# Detach disk from VM
+- ovirt.ovirt.ovirt_disk:
+ state: detached
+ name: myvm_disk
+ vm_name: rhel7
+ size: 10GiB
+ format: cow
+ interface: virtio
+
+# Change Disk Name
+- ovirt.ovirt.ovirt_disk:
+ id: 00000000-0000-0000-0000-000000000000
+ storage_domain: data
+ name: "new_disk_name"
+ vm_name: rhel7
+
+# Upload local image to disk and attach it to vm:
+# Since Ansible 2.3
+- ovirt.ovirt.ovirt_disk:
+ name: mydisk
+ vm_name: myvm
+ interface: virtio
+ size: 10GiB
+ format: cow
+ image_path: /path/to/mydisk.qcow2
+ storage_domain: data
+
+# Download disk to local file system:
+# Since Ansible 2.3
+- ovirt.ovirt.ovirt_disk:
+ id: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+ download_image_path: /home/user/mydisk.qcow2
+
+# Export disk as image to Glance domain
+# Since Ansible 2.4
+- ovirt.ovirt.ovirt_disk:
+ id: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+ image_provider: myglance
+ state: exported
+
+# Defining a specific quota while creating a disk image:
+# Since Ansible 2.5
+- ovirt.ovirt.ovirt_quotas_info:
+ data_center: Default
+ name: myquota
+ register: quota
+- ovirt.ovirt.ovirt_disk:
+ name: mydisk
+ size: 10GiB
+ storage_domain: data
+ description: somedescriptionhere
+ quota_id: "{{ quota.ovirt_quotas[0]['id'] }}"
+
+# Upload an ISO image
+# Since Ansible 2.8
+- ovirt.ovirt.ovirt_disk:
+ name: myiso
+ upload_image_path: /path/to/iso/image
+ storage_domain: data
+ size: 4 GiB
+ wait: true
+ bootable: true
+ format: raw
+ content_type: iso
+
+# Add fiber chanel disk
+- name: Create disk
+ ovirt.ovirt.ovirt_disk:
+ name: fcp_disk
+ host: my_host
+ logical_unit:
+ id: 3600a09803830447a4f244c4657597777
+ storage_type: fcp
+'''
+
+
+RETURN = '''
+id:
+ description: "ID of the managed disk"
+ returned: "On success if disk is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+disk:
+ description: "Dictionary of all the disk attributes. Disk attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk."
+ returned: "On success if disk is found and C(vm_id) or C(vm_name) wasn't passed."
+ type: dict
+
+disk_attachment:
+ description: "Dictionary of all the disk attachment attributes. Disk attachment attributes can be found
+ on your oVirt/RHV instance at following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk_attachment."
+ returned: "On success if disk is found and C(vm_id) or C(vm_name) was passed and VM was found."
+ type: dict
+'''
+
+import json
+import os
+import ssl
+import subprocess
+import time
+import traceback
+
+from ansible.module_utils.six.moves.http_client import HTTPSConnection, IncompleteRead
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ convert_to_bytes,
+ equal,
+ follow_link,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ get_dict_of_struct,
+ search_by_name,
+ wait,
+)
+
+
+def _search_by_lun(disks_service, lun_id):
+ """
+ Find disk by LUN ID.
+ """
+ res = [
+ disk for disk in disks_service.list(search='disk_type=lun') if (
+ disk.lun_storage.id == lun_id
+ )
+ ]
+ return res[0] if res else None
+
+
+def create_transfer_connection(module, transfer, context, connect_timeout=10, read_timeout=60):
+ url = urlparse(transfer.transfer_url)
+ connection = HTTPSConnection(
+ url.netloc, context=context, timeout=connect_timeout)
+ try:
+ connection.connect()
+ except Exception as e:
+ # Typically ConnectionRefusedError or socket.gaierror.
+ module.warn("Cannot connect to %s, trying %s: %s" % (transfer.transfer_url, transfer.proxy_url, e))
+
+ url = urlparse(transfer.proxy_url)
+ connection = HTTPSConnection(
+ url.netloc, context=context, timeout=connect_timeout)
+ connection.connect()
+
+ connection.sock.settimeout(read_timeout)
+ return connection, url
+
+
+def transfer(connection, module, direction, transfer_func):
+ transfers_service = connection.system_service().image_transfers_service()
+ transfer = transfers_service.add(
+ otypes.ImageTransfer(
+ image=otypes.Image(
+ id=module.params['id'],
+ ),
+ direction=direction,
+ )
+ )
+ transfer_service = transfers_service.image_transfer_service(transfer.id)
+
+ try:
+ # After adding a new transfer for the disk, the transfer's status will be INITIALIZING.
+ # Wait until the init phase is over. The actual transfer can start when its status is "Transferring".
+ while transfer.phase == otypes.ImageTransferPhase.INITIALIZING:
+ time.sleep(module.params['poll_interval'])
+ transfer = transfer_service.get()
+
+ context = ssl.create_default_context()
+ auth = module.params['auth']
+ if auth.get('insecure'):
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+ elif auth.get('ca_file'):
+ context.load_verify_locations(cafile=auth.get('ca_file'))
+
+ transfer_connection, transfer_url = create_transfer_connection(module, transfer, context)
+ transfer_func(
+ transfer_service,
+ transfer_connection,
+ transfer_url,
+ )
+ return True
+ finally:
+ transfer_service.finalize()
+ while transfer.phase in [
+ otypes.ImageTransferPhase.TRANSFERRING,
+ otypes.ImageTransferPhase.FINALIZING_SUCCESS,
+ ]:
+ time.sleep(module.params['poll_interval'])
+ transfer = transfer_service.get()
+ if transfer.phase in [
+ otypes.ImageTransferPhase.UNKNOWN,
+ otypes.ImageTransferPhase.FINISHED_FAILURE,
+ otypes.ImageTransferPhase.FINALIZING_FAILURE,
+ otypes.ImageTransferPhase.CANCELLED,
+ ]:
+ raise Exception(
+ "Error occurred while uploading image. The transfer is in %s" % transfer.phase
+ )
+ if not module.params.get('logical_unit'):
+ disks_service = connection.system_service().disks_service()
+ wait(
+ service=disks_service.service(module.params['id']),
+ condition=lambda d: d.status == otypes.DiskStatus.OK,
+ wait=module.params['wait'],
+ timeout=module.params['timeout'],
+ )
+
+
+def download_disk_image(connection, module):
+ def _transfer(transfer_service, transfer_connection, transfer_url):
+ BUF_SIZE = 128 * 1024
+ transfer_connection.request('GET', transfer_url.path)
+ r = transfer_connection.getresponse()
+ path = module.params["download_image_path"]
+ image_size = int(r.getheader('Content-Length'))
+ with open(path, "wb") as mydisk:
+ pos = 0
+ while pos < image_size:
+ to_read = min(image_size - pos, BUF_SIZE)
+ chunk = r.read(to_read)
+ if not chunk:
+ raise RuntimeError("Socket disconnected")
+ mydisk.write(chunk)
+ pos += len(chunk)
+
+ return transfer(
+ connection,
+ module,
+ otypes.ImageTransferDirection.DOWNLOAD,
+ transfer_func=_transfer,
+ )
+
+
+def upload_disk_image(connection, module):
+ def _transfer(transfer_service, transfer_connection, transfer_url):
+ BUF_SIZE = 128 * 1024
+ path = module.params['upload_image_path']
+
+ image_size = os.path.getsize(path)
+ transfer_connection.putrequest("PUT", transfer_url.path)
+ transfer_connection.putheader('Content-Length', "%d" % (image_size,))
+ transfer_connection.endheaders()
+ with open(path, "rb") as disk:
+ pos = 0
+ while pos < image_size:
+ to_read = min(image_size - pos, BUF_SIZE)
+ chunk = disk.read(to_read)
+ if not chunk:
+ transfer_service.pause()
+ raise RuntimeError("Unexpected end of file at pos=%d" % pos)
+ transfer_connection.send(chunk)
+ pos += len(chunk)
+
+ return transfer(
+ connection,
+ module,
+ otypes.ImageTransferDirection.UPLOAD,
+ transfer_func=_transfer,
+ )
+
+
+class DisksModule(BaseModule):
+
+ def build_entity(self):
+ hosts_service = self._connection.system_service().hosts_service()
+ logical_unit = self._module.params.get('logical_unit')
+ size = convert_to_bytes(self._module.params.get('size'))
+ if not size and self._module.params.get('upload_image_path'):
+ out = subprocess.check_output(
+ ["qemu-img", "info", "--output", "json", self._module.params.get('upload_image_path')])
+ image_info = json.loads(out)
+ size = image_info["virtual-size"]
+ disk = otypes.Disk(
+ id=self._module.params.get('id'),
+ name=self._module.params.get('name'),
+ description=self._module.params.get('description'),
+ format=otypes.DiskFormat(
+ self._module.params.get('format')
+ ) if self._module.params.get('format') else None,
+ content_type=otypes.DiskContentType(
+ self._module.params.get('content_type')
+ ) if self._module.params.get('content_type') else None,
+ sparse=self._module.params.get(
+ 'sparse'
+ ) if self._module.params.get(
+ 'sparse'
+ ) is not None else self._module.params.get('format') != 'raw',
+ openstack_volume_type=otypes.OpenStackVolumeType(
+ name=self.param('openstack_volume_type')
+ ) if self.param('openstack_volume_type') else None,
+ provisioned_size=size,
+ storage_domains=[
+ otypes.StorageDomain(
+ name=self._module.params.get('storage_domain'),
+ ),
+ ],
+ quota=otypes.Quota(id=self._module.params.get('quota_id')) if self.param('quota_id') else None,
+ shareable=self._module.params.get('shareable'),
+ sgio=otypes.ScsiGenericIO(self.param('scsi_passthrough')) if self.param('scsi_passthrough') else None,
+ propagate_errors=self.param('propagate_errors'),
+ backup=otypes.DiskBackup(self.param('backup')) if self.param('backup') else None,
+ wipe_after_delete=self.param('wipe_after_delete'),
+ lun_storage=otypes.HostStorage(
+ host=otypes.Host(
+ id=get_id_by_name(hosts_service, self._module.params.get('host'))
+ ) if self.param('host') else None,
+ type=otypes.StorageType(
+ logical_unit.get('storage_type', 'iscsi')
+ ),
+ logical_units=[
+ otypes.LogicalUnit(
+ address=logical_unit.get('address'),
+ port=logical_unit.get('port', 3260),
+ target=logical_unit.get('target'),
+ id=logical_unit.get('id'),
+ username=logical_unit.get('username'),
+ password=logical_unit.get('password'),
+ )
+ ],
+ ) if logical_unit else None,
+ )
+ if hasattr(disk, 'initial_size') and self._module.params['upload_image_path']:
+ out = subprocess.check_output([
+ 'qemu-img',
+ 'measure',
+ '-O', 'qcow2' if self._module.params.get('format') == 'cow' else 'raw',
+ '--output', 'json',
+ self._module.params['upload_image_path']
+ ])
+ measure = json.loads(out)
+ disk.initial_size = measure["required"]
+
+ return disk
+
+ def update_storage_domains(self, disk_id):
+ changed = False
+ disk_service = self._service.service(disk_id)
+ disk = disk_service.get()
+ sds_service = self._connection.system_service().storage_domains_service()
+
+ # We don't support move&copy for non file based storages:
+ if disk.storage_type != otypes.DiskStorageType.IMAGE:
+ return changed
+ if disk.content_type in [
+ otypes.DiskContentType(x) for x in ['hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration']]:
+ return changed
+ # Initiate move:
+ if self._module.params['storage_domain']:
+ new_disk_storage_id = get_id_by_name(sds_service, self._module.params['storage_domain'])
+ if new_disk_storage_id in [sd.id for sd in disk.storage_domains]:
+ return changed
+ changed = self.action(
+ action='move',
+ entity=disk,
+ action_condition=lambda d: new_disk_storage_id != d.storage_domains[0].id,
+ wait_condition=lambda d: d.status == otypes.DiskStatus.OK,
+ storage_domain=otypes.StorageDomain(
+ id=new_disk_storage_id,
+ ),
+ post_action=lambda _: time.sleep(self._module.params['poll_interval']),
+ )['changed']
+
+ if self._module.params['storage_domains']:
+ for sd in self._module.params['storage_domains']:
+ new_disk_storage = search_by_name(sds_service, sd)
+ changed = changed or self.action(
+ action='copy',
+ entity=disk,
+ action_condition=(
+ lambda disk: new_disk_storage.id not in [sd.id for sd in disk.storage_domains]
+ ),
+ wait_condition=lambda disk: disk.status == otypes.DiskStatus.OK,
+ storage_domain=otypes.StorageDomain(
+ id=new_disk_storage.id,
+ ),
+ )['changed']
+
+ return changed
+
+ def _update_check(self, entity):
+ return (
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self.param('quota_id'), getattr(entity.quota, 'id', None)) and
+ equal(convert_to_bytes(self._module.params.get('size')), entity.provisioned_size) and
+ equal(self._module.params.get('shareable'), entity.shareable) and
+ equal(self.param('propagate_errors'), entity.propagate_errors) and
+ equal(otypes.ScsiGenericIO(self.param('scsi_passthrough')) if self.param('scsi_passthrough') else None, entity.sgio) and
+ equal(self.param('wipe_after_delete'), entity.wipe_after_delete)
+ )
+
+
+class DiskAttachmentsModule(DisksModule):
+
+ def build_entity(self):
+ return otypes.DiskAttachment(
+ disk=super(DiskAttachmentsModule, self).build_entity(),
+ interface=otypes.DiskInterface(
+ self._module.params.get('interface')
+ ) if self._module.params.get('interface') else None,
+ bootable=self._module.params.get('bootable'),
+ active=self.param('activate'),
+ uses_scsi_reservation=self.param('uses_scsi_reservation'),
+ pass_discard=self.param('pass_discard'),
+ )
+
+ def update_check(self, entity):
+ return (
+ super(DiskAttachmentsModule, self)._update_check(follow_link(self._connection, entity.disk)) and
+ equal(self._module.params.get('interface'), str(entity.interface)) and
+ equal(self._module.params.get('bootable'), entity.bootable) and
+ equal(self._module.params.get('pass_discard'), entity.pass_discard) and
+ equal(self._module.params.get('uses_scsi_reservation'), entity.uses_scsi_reservation) and
+ equal(self.param('activate'), entity.active)
+ )
+
+
+def searchable_attributes(module):
+ """
+ Return all searchable disk attributes passed to module.
+ """
+ attributes = {
+ 'name': module.params.get('name'),
+ 'Storage.name': module.params.get('storage_domain'),
+ 'vm_names': module.params.get('vm_name'),
+ }
+ return dict((k, v) for k, v in attributes.items() if v is not None)
+
+
+def get_vm_service(connection, module):
+ if module.params.get('vm_id') is not None or module.params.get('vm_name') is not None and module.params['state'] != 'absent':
+ vms_service = connection.system_service().vms_service()
+
+ # If `vm_id` isn't specified, find VM by name:
+ vm_id = module.params['vm_id']
+ if vm_id is None:
+ vm_id = get_id_by_name(vms_service, module.params['vm_name'])
+
+ if vm_id is None:
+ module.fail_json(
+ msg="VM don't exists, please create it first."
+ )
+
+ return vms_service.vm_service(vm_id)
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'attached', 'detached', 'exported', 'imported'],
+ default='present'
+ ),
+ id=dict(default=None),
+ name=dict(default=None, aliases=['alias']),
+ description=dict(default=None),
+ vm_name=dict(default=None),
+ vm_id=dict(default=None),
+ size=dict(default=None),
+ interface=dict(default=None, choices=['virtio', 'ide', 'virtio_scsi']),
+ storage_domain=dict(default=None),
+ storage_domains=dict(default=None, type='list', elements='str'),
+ profile=dict(default=None),
+ quota_id=dict(default=None),
+ format=dict(default='cow', choices=['raw', 'cow']),
+ content_type=dict(
+ default='data',
+ choices=['data', 'iso', 'hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration']
+ ),
+ backup=dict(default=None, type='str', choices=['incremental']),
+ sparse=dict(default=None, type='bool'),
+ bootable=dict(default=None, type='bool'),
+ shareable=dict(default=None, type='bool'),
+ scsi_passthrough=dict(default=None, type='str', choices=['disabled', 'filtered', 'unfiltered']),
+ uses_scsi_reservation=dict(default=None, type='bool'),
+ pass_discard=dict(default=None, type='bool'),
+ propagate_errors=dict(default=None, type='bool'),
+ logical_unit=dict(default=None, type='dict'),
+ download_image_path=dict(default=None),
+ upload_image_path=dict(default=None, aliases=['image_path']),
+ force=dict(default=False, type='bool'),
+ sparsify=dict(default=None, type='bool'),
+ openstack_volume_type=dict(default=None),
+ image_provider=dict(default=None),
+ host=dict(default=None),
+ wipe_after_delete=dict(type='bool', default=None),
+ activate=dict(default=None, type='bool'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ lun = module.params.get('logical_unit')
+ host = module.params['host']
+ # Fail when host is specified with the LUN id. Lun id is needed to identify
+ # an existing disk if already available inthe environment.
+ if (host and lun is None) or (host and lun.get("id") is None):
+ module.fail_json(
+ msg="Can not use parameter host ({0!s}) without "
+ "specifying the logical_unit id".format(host)
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ disk = None
+ state = module.params['state']
+ auth = module.params.get('auth')
+ connection = create_connection(auth)
+ disks_service = connection.system_service().disks_service()
+ disks_module = DisksModule(
+ connection=connection,
+ module=module,
+ service=disks_service,
+ )
+
+ force_create = False
+ vm_service = get_vm_service(connection, module)
+ if lun:
+ disk = _search_by_lun(disks_service, lun.get('id'))
+ else:
+ disk = disks_module.search_entity(search_params=searchable_attributes(module))
+ if vm_service and disk:
+ # If the VM don't exist in VMs disks, but still it's found it means it was found
+ # for template with same name as VM, so we should force create the VM disk.
+ force_create = disk.id not in [a.disk.id for a in vm_service.disk_attachments_service().list() if a.disk]
+
+ ret = None
+ # First take care of creating the VM, if needed:
+ if state in ('present', 'detached', 'attached'):
+ # Always activate disk when its being created
+ if vm_service is not None and disk is None:
+ module.params['activate'] = module.params['activate'] is None or module.params['activate']
+ ret = disks_module.create(
+ entity=disk if not force_create else None,
+ result_state=otypes.DiskStatus.OK if lun is None else None,
+ search_params=searchable_attributes(module),
+ fail_condition=lambda d: d.status == otypes.DiskStatus.ILLEGAL if lun is None else False,
+ force_create=force_create,
+ _wait=True if module.params['upload_image_path'] else module.params['wait'],
+ )
+ is_new_disk = ret['changed']
+ ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id'])
+ # We need to pass ID to the module, so in case we want detach/attach disk
+ # we have this ID specified to attach/detach method:
+ module.params['id'] = ret['id']
+
+ # Upload disk image in case it's new disk or force parameter is passed:
+ if module.params['upload_image_path'] and (is_new_disk or module.params['force']):
+ if module.params['format'] == 'cow' and module.params['content_type'] == 'iso':
+ module.warn("To upload an ISO image 'format' parameter needs to be set to 'raw'.")
+ uploaded = upload_disk_image(connection, module)
+ ret['changed'] = ret['changed'] or uploaded
+ # Download disk image in case it's file don't exist or force parameter is passed:
+ if (
+ module.params['download_image_path'] and (not os.path.isfile(module.params['download_image_path']) or module.params['force'])
+ ):
+ downloaded = download_disk_image(connection, module)
+ ret['changed'] = ret['changed'] or downloaded
+
+ # Disk sparsify, only if disk is of image type:
+ if not module.check_mode:
+ disk = disks_service.disk_service(module.params['id']).get()
+ if disk.storage_type == otypes.DiskStorageType.IMAGE:
+ ret = disks_module.action(
+ action='sparsify',
+ action_condition=lambda d: module.params['sparsify'],
+ wait_condition=lambda d: d.status == otypes.DiskStatus.OK,
+ )
+
+ # Export disk as image to glance domain
+ elif state == 'exported':
+ disk = disks_module.search_entity()
+ if disk is None:
+ module.fail_json(
+ msg="Can not export given disk '%s', it doesn't exist" %
+ module.params.get('name') or module.params.get('id')
+ )
+ if disk.storage_type == otypes.DiskStorageType.IMAGE:
+ ret = disks_module.action(
+ action='export',
+ action_condition=lambda d: module.params['image_provider'],
+ wait_condition=lambda d: d.status == otypes.DiskStatus.OK,
+ storage_domain=otypes.StorageDomain(name=module.params['image_provider']),
+ )
+ elif state == 'imported':
+ glance_service = connection.system_service().openstack_image_providers_service()
+ image_provider = search_by_name(glance_service, module.params['image_provider'])
+ images_service = glance_service.service(image_provider.id).images_service()
+ entity_id = get_id_by_name(images_service, module.params['name'])
+ images_service.service(entity_id).import_(
+ storage_domain=otypes.StorageDomain(
+ name=module.params['storage_domain']
+ ) if module.params['storage_domain'] else None,
+ disk=otypes.Disk(
+ name=module.params['name']
+ ),
+ import_as_template=False,
+ )
+ # Wait for disk to appear in system:
+ disk = disks_module.wait_for_import(
+ condition=lambda t: t.status == otypes.DiskStatus.OK
+ )
+ ret = disks_module.create(result_state=otypes.DiskStatus.OK)
+ elif state == 'absent':
+ ret = disks_module.remove()
+
+ # If VM was passed attach/detach disks to/from the VM:
+ if vm_service:
+ disk_attachments_service = vm_service.disk_attachments_service()
+ disk_attachments_module = DiskAttachmentsModule(
+ connection=connection,
+ module=module,
+ service=disk_attachments_service,
+ changed=ret['changed'] if ret else False,
+ )
+
+ if state == 'present' or state == 'attached':
+ ret = disk_attachments_module.create()
+ if lun is None:
+ wait(
+ service=disk_attachments_service.service(ret['id']),
+ condition=lambda d: follow_link(connection, d.disk).status == otypes.DiskStatus.OK,
+ wait=module.params['wait'],
+ timeout=module.params['timeout'],
+ )
+ elif state == 'detached':
+ ret = disk_attachments_module.remove()
+
+ # When the host parameter is specified and the disk is not being
+ # removed, refresh the information about the LUN.
+ if state != 'absent' and host:
+ hosts_service = connection.system_service().hosts_service()
+ host_id = get_id_by_name(hosts_service, host)
+ disks_service.disk_service(disk.id).refresh_lun(otypes.Host(id=host_id))
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_info.py
new file mode 100644
index 00000000..5601da2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_disk_info.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_disk_info
+short_description: Retrieve information about one or more oVirt/RHV disks
+version_added: "1.0.0"
+author: "Katerina Koukiou (@KKoukiou)"
+description:
+ - "Retrieve information about one or more oVirt/RHV disks."
+ - This module was called C(ovirt_disk_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_disk_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_disks), which
+ contains a list of disks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search Disk X from storage Y use following pattern:
+ name=X and storage.name=Y"
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all Disks which names start with C(centos)
+- ovirt.ovirt.ovirt_disk_info:
+ pattern: name=centos*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_disks }}"
+'''
+
+RETURN = '''
+ovirt_disks:
+ description: "List of dictionaries describing the Disks. Disk attributes are mapped to dictionary keys,
+ all Disks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ disks_service = connection.system_service().disks_service()
+ disks = disks_service.list(
+ search=module.params['pattern'],
+ )
+ result = dict(
+ ovirt_disks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in disks
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event.py
new file mode 100644
index 00000000..bec809b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_event
+short_description: Create or delete an event in oVirt/RHV
+version_added: "1.0.0"
+author: "Chris Keller (@nasx)"
+description:
+ - "This module can be used to create or delete an event in oVirt/RHV."
+options:
+ state:
+ description:
+ - "Should the event be present/absent."
+ - "The C(wait) option must be set to false when state is absent."
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ description:
+ description:
+ - "Message for the event."
+ - "Required when state is present."
+ type: str
+ severity:
+ description:
+ - "Severity of the event."
+ - "Required when state is present."
+ choices: ['error', 'normal', 'warning']
+ default: normal
+ type: str
+ origin:
+ description:
+ - "Originator of the event."
+ - "Required when state is present."
+ type: str
+ custom_id:
+ description:
+ - "Custom ID for the event. This ID must be unique for each event."
+ - "Required when state is present."
+ type: int
+ id:
+ description:
+ - "The event ID in the oVirt/RHV audit_log table. This ID is not the same as custom_id and is only used when state is absent."
+ - "Required when state is absent."
+ type: str
+ cluster:
+ description:
+ - "The id of the cluster associated with this event."
+ type: str
+ data_center:
+ description:
+ - "The id of the data center associated with this event."
+ type: str
+ host:
+ description:
+ - "The id of the host associated with this event."
+ type: str
+ storage_domain:
+ description:
+ - "The id of the storage domain associated with this event."
+ type: str
+ template:
+ description:
+ - "The id of the template associated with this event."
+ type: str
+ user:
+ description:
+ - "The id of the user associated with this event."
+ type: str
+ vm:
+ description:
+ - "The id of the VM associated with this event."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain the auth parameter for simplicity,
+# look at the ovirt_auth module to see how to reuse authentication.
+
+- name: Create an event
+ ovirt.ovirt.ovirt_event:
+ state: present
+ description: "The file system /home on host xyz is almost full!"
+ origin: "mymonitor"
+ custom_id: 123456789
+ severity: warning
+
+- name: Create an event and link it to a specific object
+ ovirt.ovirt.ovirt_event:
+ state: present
+ description: "The file system /home is almost full!"
+ origin: "mymonitor"
+ custom_id: 123456789
+ severity: warning
+ vm: "c79db183-46ef-44d1-95f9-1a368c516c19"
+
+- name: Remove an event
+ ovirt.ovirt.ovirt_event:
+ state: absent
+ id: 123456789
+ wait: false
+'''
+
+RETURN = '''
+id:
+ description: "ID of the event that was created."
+ returned: "On success."
+ type: str
+event:
+ description: "Dictionary of all the Event attributes. All event attributes can be found at the following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/event"
+ returned: "On success."
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+class EventsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Event(
+ description=self._module.params['description'],
+ severity=otypes.LogSeverity(self._module.params['severity']),
+ origin=self._module.params['origin'],
+ custom_id=self._module.params['custom_id'],
+ id=self._module.params['id'],
+ cluster=otypes.Cluster(
+ id=self._module.params['cluster']
+ ) if self._module.params['cluster'] is not None else None,
+ data_center=otypes.DataCenter(
+ id=self._module.params['data_center']
+ ) if self._module.params['data_center'] is not None else None,
+ host=otypes.Host(
+ id=self._module.params['host']
+ ) if self._module.params['host'] is not None else None,
+ storage_domain=otypes.StorageDomain(
+ id=self._module.params['storage_domain']
+ ) if self._module.params['storage_domain'] is not None else None,
+ template=otypes.Template(
+ id=self._module.params['template']
+ ) if self._module.params['template'] is not None else None,
+ user=otypes.User(
+ id=self._module.params['user']
+ ) if self._module.params['user'] is not None else None,
+ vm=otypes.Vm(
+ id=self._module.params['vm']
+ ) if self._module.params['vm'] is not None else None,
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ description=dict(default=None),
+ severity=dict(
+ choices=['error', 'normal', 'warning'],
+ default='normal',
+ ),
+ origin=dict(default=None),
+ custom_id=dict(default=None, type='int'),
+ id=dict(default=None),
+ cluster=dict(default=None),
+ data_center=dict(default=None),
+ host=dict(default=None),
+ storage_domain=dict(default=None),
+ template=dict(default=None),
+ user=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ check_sdk(module)
+
+ # Wait must be set to false if state == absent
+
+ if module.params['state'] == 'absent' and module.params['wait'] is not False:
+ module.fail_json(msg='When "state" is absent, "wait" must be set to false.')
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ events_service = connection.system_service().events_service()
+ events_module = EventsModule(
+ connection=connection,
+ module=module,
+ service=events_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = events_module.create()
+ elif state == 'absent':
+ ret = events_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event_info.py
new file mode 100644
index 00000000..9730e0fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_event_info.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_event_info
+short_description: This module can be used to retrieve information about one or more oVirt/RHV events
+version_added: "1.0.0"
+author: "Chris Keller (@nasx)"
+description:
+ - "Retrieve information about one or more oVirt/RHV events."
+ - This module was called C(ovirt_event_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_event_info) module no longer returns C(ansible_facts)!
+options:
+ case_sensitive:
+ description:
+ - "Indicates if the search performed using the search parameter should be performed taking case
+ into account. The default value is true, which means that case is taken into account. If you
+ want to search ignoring case set it to false."
+ required: false
+ default: true
+ type: bool
+ from_:
+ description:
+ - "Indicates the event index after which events should be returned. The indexes of events are
+ strictly increasing, so when this parameter is used only the events with greater indexes
+ will be returned."
+ required: false
+ type: int
+ max:
+ description:
+ - "Sets the maximum number of events to return. If not specified all the events are returned."
+ required: false
+ type: int
+ search:
+ description:
+ - "Search term which is accepted by the oVirt/RHV API."
+ - "For example to search for events of severity alert use the following pattern: severity=alert"
+ required: false
+ type: str
+ headers:
+ description:
+ - "Additional HTTP headers."
+ required: false
+ type: str
+ query:
+ description:
+ - "Additional URL query parameters."
+ required: false
+ type: str
+ wait:
+ description:
+ - "If True wait for the response."
+ required: false
+ default: true
+ type: bool
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain the auth parameter for simplicity,
+# look at the ovirt_auth module to see how to reuse authentication.
+
+- name: Return all events
+ ovirt.ovirt.ovirt_event_info:
+ register: result
+
+- name: Return the last 10 events
+ ovirt.ovirt.ovirt_event_info:
+ max: 10
+ register: result
+
+- name: Return all events of type alert
+ ovirt.ovirt.ovirt_event_info:
+ search: "severity=alert"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_events }}"
+'''
+
+RETURN = '''
+ovirt_events:
+ description: "List of dictionaries describing the events. Event attributes are mapped to dictionary keys.
+ All event attributes can be found at the following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/event"
+ returned: On success."
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ case_sensitive=dict(default=True, type='bool', required=False),
+ from_=dict(default=None, type='int', required=False),
+ max=dict(default=None, type='int', required=False),
+ search=dict(default='', required=False),
+ headers=dict(default='', required=False),
+ query=dict(default='', required=False),
+ wait=dict(default=True, type='bool', required=False)
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ events_service = connection.system_service().events_service()
+ events = events_service.list(
+ case_sensitive=module.params['case_sensitive'],
+ from_=module.params['from_'],
+ max=module.params['max'],
+ search=module.params['search'],
+ headers=module.params['headers'],
+ query=module.params['query'],
+ wait=module.params['wait']
+ )
+
+ result = dict(
+ ovirt_events=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in events
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider.py
new file mode 100644
index 00000000..842b45a3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_external_provider
+short_description: Module to manage external providers in oVirt/RHV
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage external providers in oVirt/RHV"
+options:
+ name:
+ description:
+ - "Name of the external provider to manage."
+ type: str
+ state:
+ description:
+ - "Should the external be present or absent"
+ - "When you are using absent for I(os_volume), you need to make
+ sure that SD is not attached to the data center!"
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ description:
+ description:
+ - "Description of the external provider."
+ type: str
+ type:
+ description:
+ - "Type of the external provider."
+ choices: ['os_image', 'network', 'os_volume', 'foreman']
+ required: true
+ type: str
+ aliases: ['provider']
+ url:
+ description:
+ - "URL where external provider is hosted."
+ - "Applicable for those types: I(os_image), I(os_volume), I(network) and I(foreman)."
+ type: str
+ username:
+ description:
+ - "Username to be used for login to external provider."
+ - "Applicable for all types."
+ type: str
+ password:
+ description:
+ - "Password of the user specified in C(username) parameter."
+ - "Applicable for all types."
+ type: str
+ tenant_name:
+ description:
+ - "Name of the tenant."
+ - "Applicable for those types: I(os_image), I(os_volume) and I(network)."
+ aliases: ['tenant']
+ type: str
+ authentication_url:
+ description:
+ - "Keystone authentication URL of the openstack provider."
+ - "Applicable for those types: I(os_image), I(os_volume) and I(network)."
+ aliases: ['auth_url']
+ type: str
+ data_center:
+ description:
+ - "Name of the data center where provider should be attached."
+ - "Applicable for those type: I(os_volume)."
+ type: str
+ read_only:
+ description:
+ - "Specify if the network should be read only."
+ - "Applicable if C(type) is I(network)."
+ type: bool
+ network_type:
+ description:
+ - "Type of the external network provider either external (for example OVN) or neutron."
+ - "Applicable if C(type) is I(network)."
+ choices: ['external', 'neutron']
+ default: 'external'
+ type: str
+ authentication_keys:
+ description:
+ - "List of authentication keys."
+ - "When you will not pass these keys and there are already some
+ of them defined in the system they will be removed."
+ - "Applicable for I(os_volume)."
+ suboptions:
+ uuid:
+ description:
+ - The uuid which will be used.
+ value:
+ description:
+ - The value which will be used.
+ default: []
+ type: list
+ elements: dict
+ aliases: ['auth_keys']
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add image external provider:
+- ovirt.ovirt.ovirt_external_provider:
+ name: image_provider
+ type: os_image
+ url: http://1.2.3.4:9292
+ username: admin
+ password: 123456
+ tenant: admin
+ auth_url: http://1.2.3.4:35357/v2.0
+
+# Add volume external provider:
+- ovirt.ovirt.ovirt_external_provider:
+ name: image_provider
+ type: os_volume
+ url: http://1.2.3.4:9292
+ username: admin
+ password: 123456
+ tenant: admin
+ auth_url: http://1.2.3.4:5000/v2.0
+ authentication_keys:
+ -
+ uuid: "1234567-a1234-12a3-a234-123abc45678"
+ value: "ABCD00000000111111222333445w=="
+
+# Add foreman provider:
+- ovirt.ovirt.ovirt_external_provider:
+ name: foreman_provider
+ type: foreman
+ url: https://foreman.example.com
+ username: admin
+ password: 123456
+
+# Add external network provider for OVN:
+- ovirt.ovirt.ovirt_external_provider:
+ name: ovn_provider
+ type: network
+ network_type: external
+ url: http://1.2.3.4:9696
+
+# Remove image external provider:
+- ovirt.ovirt.ovirt_external_provider:
+ state: absent
+ name: image_provider
+ type: os_image
+'''
+
+RETURN = '''
+id:
+ description: ID of the external provider which is managed
+ returned: On success if external provider is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+external_host_provider:
+ description: "Dictionary of all the external_host_provider attributes. External provider attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
+ returned: "On success and if parameter 'type: foreman' is used."
+ type: dict
+openstack_image_provider:
+ description: "Dictionary of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
+ returned: "On success and if parameter 'type: os_image' is used."
+ type: dict
+openstack_volume_provider:
+ description: "Dictionary of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
+ returned: "On success and if parameter 'type: os_volume' is used."
+ type: dict
+openstack_network_provider:
+ description: "Dictionary of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
+ returned: "On success and if parameter 'type: network' is used."
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_params,
+ check_sdk,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+)
+
+
+OS_VOLUME = 'os_volume'
+OS_IMAGE = 'os_image'
+NETWORK = 'network'
+FOREMAN = 'foreman'
+
+
+class ExternalProviderModule(BaseModule):
+
+ non_provider_params = ['type', 'authentication_keys', 'data_center']
+
+ def provider_type(self, provider_type):
+ self._provider_type = provider_type
+
+ def provider_module_params(self):
+ provider_params = [
+ (key, value) for key, value in self._module.params.items() if key
+ not in self.non_provider_params
+ ]
+ provider_params.append(('data_center', self.get_data_center()))
+ return provider_params
+
+ def get_data_center(self):
+ dc_name = self._module.params.get("data_center", None)
+ if dc_name:
+ system_service = self._connection.system_service()
+ data_centers_service = system_service.data_centers_service()
+ return data_centers_service.list(
+ search='name=%s' % dc_name,
+ )[0]
+ return dc_name
+
+ def build_entity(self):
+ provider_type = self._provider_type(
+ requires_authentication=self._module.params.get('username') is not None,
+ )
+ if self._module.params.pop('type') == NETWORK:
+ setattr(
+ provider_type,
+ 'type',
+ otypes.OpenStackNetworkProviderType(self._module.params.pop('network_type'))
+ )
+
+ for key, value in self.provider_module_params():
+ if hasattr(provider_type, key):
+ setattr(provider_type, key, value)
+
+ return provider_type
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('url'), entity.url) and
+ equal(self._module.params.get('authentication_url'), entity.authentication_url) and
+ equal(self._module.params.get('tenant_name'), getattr(entity, 'tenant_name', None)) and
+ equal(self._module.params.get('username'), entity.username)
+ )
+
+ def update_volume_provider_auth_keys(
+ self, provider, providers_service, keys
+ ):
+ """
+ Update auth keys for volume provider, if not exist add them or remove
+ if they are not specified and there are already defined in the external
+ volume provider.
+
+ Args:
+ provider (dict): Volume provider details.
+ providers_service (openstack_volume_providers_service): Provider
+ service.
+ keys (list): Keys to be updated/added to volume provider, each key
+ is represented as dict with keys: uuid, value.
+ """
+
+ provider_service = providers_service.provider_service(provider['id'])
+ auth_keys_service = provider_service.authentication_keys_service()
+ provider_keys = auth_keys_service.list()
+ # removing keys which are not defined
+ for key in [
+ k.id for k in provider_keys if k.uuid not in [
+ defined_key['uuid'] for defined_key in keys
+ ]
+ ]:
+ self.changed = True
+ if not self._module.check_mode:
+ auth_keys_service.key_service(key).remove()
+ if not (provider_keys or keys):
+ # Nothing need to do when both are empty.
+ return
+ for key in keys:
+ key_id_for_update = None
+ for existing_key in provider_keys:
+ if key['uuid'] == existing_key.uuid:
+ key_id_for_update = existing_key.id
+
+ auth_key_usage_type = (
+ otypes.OpenstackVolumeAuthenticationKeyUsageType("ceph")
+ )
+ auth_key = otypes.OpenstackVolumeAuthenticationKey(
+ usage_type=auth_key_usage_type,
+ uuid=key['uuid'],
+ value=key['value'],
+ )
+
+ if not key_id_for_update:
+ self.changed = True
+ if not self._module.check_mode:
+ auth_keys_service.add(auth_key)
+ else:
+ # We cannot really distinguish here if it was really updated cause
+ # we cannot take key value to check if it was changed or not. So
+ # for sure we update here always.
+ self.changed = True
+ if not self._module.check_mode:
+ auth_key_service = (
+ auth_keys_service.key_service(key_id_for_update)
+ )
+ auth_key_service.update(auth_key)
+
+
+def _external_provider_service(provider_type, system_service):
+ if provider_type == OS_IMAGE:
+ return otypes.OpenStackImageProvider, system_service.openstack_image_providers_service()
+ elif provider_type == NETWORK:
+ return otypes.OpenStackNetworkProvider, system_service.openstack_network_providers_service()
+ elif provider_type == OS_VOLUME:
+ return otypes.OpenStackVolumeProvider, system_service.openstack_volume_providers_service()
+ elif provider_type == FOREMAN:
+ return otypes.ExternalHostProvider, system_service.external_host_providers_service()
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(default=None),
+ description=dict(default=None),
+ type=dict(
+ required=True,
+ choices=[
+ OS_IMAGE, NETWORK, OS_VOLUME, FOREMAN,
+ ],
+ aliases=['provider'],
+ ),
+ url=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ tenant_name=dict(default=None, aliases=['tenant']),
+ authentication_url=dict(default=None, aliases=['auth_url']),
+ data_center=dict(default=None),
+ read_only=dict(default=None, type='bool'),
+ network_type=dict(
+ default='external',
+ choices=['external', 'neutron'],
+ ),
+ authentication_keys=dict(
+ default=[], aliases=['auth_keys'], type='list', no_log=True, elements='dict'
+ ),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ provider_type_param = module.params.get('type')
+ provider_type, external_providers_service = _external_provider_service(
+ provider_type=provider_type_param,
+ system_service=connection.system_service(),
+ )
+ external_providers_module = ExternalProviderModule(
+ connection=connection,
+ module=module,
+ service=external_providers_service,
+ )
+ external_providers_module.provider_type(provider_type)
+
+ state = module.params.pop('state')
+ if state == 'absent':
+ ret = external_providers_module.remove()
+ elif state == 'present':
+ ret = external_providers_module.create()
+ openstack_volume_provider_id = ret.get('id')
+ if (
+ provider_type_param == OS_VOLUME and
+ openstack_volume_provider_id
+ ):
+ external_providers_module.update_volume_provider_auth_keys(
+ ret, external_providers_service,
+ module.params.get('authentication_keys'),
+ )
+
+ module.exit_json(**ret)
+
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider_info.py
new file mode 100644
index 00000000..ad3f40bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_external_provider_info.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_external_provider_info
+short_description: Retrieve information about one or more oVirt/RHV external providers
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV external providers."
+ - This module was called C(ovirt_external_provider_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_external_provider_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_external_providers), which
+ contains a list of external_providers. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ type:
+ description:
+ - "Type of the external provider."
+ choices: ['os_image', 'os_network', 'os_volume', 'foreman']
+ required: true
+ type: str
+ aliases: ['provider']
+ name:
+ description:
+ - "Name of the external provider, can be used as glob expression."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all image external providers named C<glance>:
+- ovirt.ovirt.ovirt_external_provider_info:
+ type: os_image
+ name: glance
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_external_providers }}"
+'''
+
+RETURN = '''
+ovirt_external_providers:
+ description:
+ - "List of dictionaries. Content depends on I(type)."
+ - "For type C(foreman), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
+ - "For type C(os_image), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
+ - "For type C(os_volume), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
+ - "For type C(os_network), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
+ returned: On success
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def _external_provider_service(provider_type, system_service):
+ if provider_type == 'os_image':
+ return system_service.openstack_image_providers_service()
+ elif provider_type == 'os_network':
+ return system_service.openstack_network_providers_service()
+ elif provider_type == 'os_volume':
+ return system_service.openstack_volume_providers_service()
+ elif provider_type == 'foreman':
+ return system_service.external_host_providers_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None, required=False),
+ type=dict(
+ required=True,
+ choices=[
+ 'os_image', 'os_network', 'os_volume', 'foreman',
+ ],
+ aliases=['provider'],
+ ),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ external_providers_service = _external_provider_service(
+ provider_type=module.params.pop('type'),
+ system_service=connection.system_service(),
+ )
+ if module.params['name']:
+ external_providers = [
+ e for e in external_providers_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ external_providers = external_providers_service.list()
+
+ result = dict(
+ ovirt_external_providers=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in external_providers
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group.py
new file mode 100644
index 00000000..5e154ff6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_group
+short_description: Module to manage groups in oVirt/RHV
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage groups in oVirt/RHV"
+options:
+ name:
+ description:
+ - "Name of the group to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the group be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ authz_name:
+ description:
+ - "Authorization provider of the group. In previous versions of oVirt/RHV known as domain."
+ required: true
+ aliases: ['domain']
+ type: str
+ namespace:
+ description:
+ - "Namespace of the authorization provider, where group resides."
+ required: false
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add group group1 from authorization provider example.com-authz
+- ovirt.ovirt.ovirt_group:
+ name: group1
+ domain: example.com-authz
+
+# Add group group1 from authorization provider example.com-authz
+# In case of multi-domain Active Directory setup, you should pass
+# also namespace, so it adds correct group:
+- ovirt.ovirt.ovirt_group:
+ name: group1
+ namespace: dc=ad2,dc=example,dc=com
+ domain: example.com-authz
+
+# Remove group group1 with authorization provider example.com-authz
+- ovirt.ovirt.ovirt_group:
+ state: absent
+ name: group1
+ domain: example.com-authz
+'''
+
+RETURN = '''
+id:
+ description: ID of the group which is managed
+ returned: On success if group is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+group:
+ description: "Dictionary of all the group attributes. Group attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
+ returned: On success if group is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+)
+
+
+def _group(connection, module):
+ groups = connection.system_service().groups_service().list(
+ search="name={name}".format(
+ name=module.params['name'],
+ )
+ )
+
+ # If found more groups, filter them by namespace and authz name:
+ # (filtering here, as oVirt/RHV backend doesn't support it)
+ if len(groups) > 1:
+ groups = [
+ g for g in groups if (
+ equal(module.params['namespace'], g.namespace) and
+ equal(module.params['authz_name'], g.domain.name)
+ )
+ ]
+ return groups[0] if groups else None
+
+
+class GroupsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Group(
+ domain=otypes.Domain(
+ name=self._module.params['authz_name']
+ ),
+ name=self._module.params['name'],
+ namespace=self._module.params['namespace'],
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ authz_name=dict(required=True, aliases=['domain']),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ groups_service = connection.system_service().groups_service()
+ groups_module = GroupsModule(
+ connection=connection,
+ module=module,
+ service=groups_service,
+ )
+ group = _group(connection, module)
+ state = module.params['state']
+ if state == 'present':
+ ret = groups_module.create(entity=group)
+ elif state == 'absent':
+ ret = groups_module.remove(entity=group)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group_info.py
new file mode 100644
index 00000000..a6364f97
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_group_info.py
@@ -0,0 +1,109 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_group_info
+short_description: Retrieve information about one or more oVirt/RHV groups
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV groups."
+ - This module was called C(ovirt_group_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_group_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_groups), which
+ contains a list of groups. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search group X use following pattern: name=X"
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all groups which names start with C(admin):
+- ovirt.ovirt.ovirt_group_info:
+ pattern: name=admin*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_groups }}"
+'''
+
+RETURN = '''
+ovirt_groups:
+ description: "List of dictionaries describing the groups. Group attributes are mapped to dictionary keys,
+ all groups attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ groups_service = connection.system_service().groups_service()
+ groups = groups_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_groups=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in groups
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host.py
new file mode 100644
index 00000000..cc428d23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host.py
@@ -0,0 +1,711 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host
+short_description: Module to manage hosts in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage hosts in oVirt/RHV"
+options:
+ id:
+ description:
+ - "ID of the host to manage."
+ type: str
+ name:
+ description:
+ - "Name of the host to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "State which should a host to be in after successful completion."
+ - "I(iscsilogin) and I(iscsidiscover) are supported since version 2.4."
+ choices: [
+ 'present', 'absent', 'maintenance', 'upgraded', 'started',
+ 'restarted', 'stopped', 'reinstalled', 'iscsidiscover', 'iscsilogin'
+ ]
+ default: present
+ type: str
+ comment:
+ description:
+ - "Description of the host."
+ type: str
+ timeout:
+ description:
+ - "The amount of time in seconds the module should wait for the host to
+ get into desired state."
+ default: 600
+ cluster:
+ description:
+ - "Name of the cluster, where host should be created."
+ type: str
+ address:
+ description:
+ - "Host address. It can be either FQDN (preferred) or IP address."
+ type: str
+ password:
+ description:
+ - "Password of the root. It's required in case C(public_key) is set to I(False)."
+ type: str
+ ssh_port:
+ description:
+ - "The host SSH port."
+ type: int
+ public_key:
+ description:
+ - "I(True) if the public key should be used to authenticate to host."
+ - "It's required in case C(password) is not set."
+ default: False
+ type: bool
+ aliases: ['ssh_public_key']
+ kdump_integration:
+ description:
+ - "Specify if host will have enabled Kdump integration."
+ choices: ['enabled', 'disabled']
+ type: str
+ spm_priority:
+ description:
+ - "SPM priority of the host. Integer value from 1 to 10, where higher number means higher priority."
+ type: int
+ override_iptables:
+ description:
+ - "If True host iptables will be overridden by host deploy script."
+ - "Note that C(override_iptables) is I(false) by default in oVirt/RHV."
+ type: bool
+ force:
+ description:
+ - "Indicates that the host should be removed even if it is non-responsive,
+ or if it is part of a Gluster Storage cluster and has volume bricks on it."
+ - "WARNING: It doesn't forcibly remove the host if another host related operation is being executed on the host at the same time."
+ default: False
+ type: bool
+ override_display:
+ description:
+ - "Override the display address of all VMs on this host with specified address."
+ type: str
+ kernel_params:
+ description:
+ - "List of kernel boot parameters."
+ - "Following are most common kernel parameters used for host:"
+ - "Hostdev Passthrough & SR-IOV: intel_iommu=on"
+ - "Nested Virtualization: kvm-intel.nested=1"
+ - "Unsafe Interrupts: vfio_iommu_type1.allow_unsafe_interrupts=1"
+ - "PCI Reallocation: pci=realloc"
+ - "C(Note:)"
+ - "Modifying kernel boot parameters settings can lead to a host boot failure.
+ Please consult the product documentation before doing any changes."
+ - "Kernel boot parameters changes require host deploy and restart. The host needs
+ to be I(reinstalled) successfully and then to be I(rebooted) for kernel boot parameters
+ to be applied."
+ type: list
+ elements: dict
+ hosted_engine:
+ description:
+ - "If I(deploy) it means this host should deploy also hosted engine
+ components."
+ - "If I(undeploy) it means this host should un-deploy hosted engine
+ components and this host will not function as part of the High
+ Availability cluster."
+ choices:
+ - 'deploy'
+ - 'undeploy'
+ type: str
+ power_management_enabled:
+ description:
+ - "Enable or disable power management of the host."
+ - "For more comprehensive setup of PM use C(ovirt_host_pm) module."
+ type: bool
+ activate:
+ description:
+ - "If C(state) is I(present) activate the host."
+ - "This parameter is good to disable, when you don't want to change
+ the state of host when using I(present) C(state)."
+ default: True
+ type: bool
+ iscsi:
+ description:
+ - "If C(state) is I(iscsidiscover) it means that the iscsi attribute is being
+ used to discover targets"
+ - "If C(state) is I(iscsilogin) it means that the iscsi attribute is being
+ used to login to the specified targets passed as part of the iscsi attribute"
+ suboptions:
+ username:
+ description:
+ - "A CHAP user name for logging into a target."
+ password:
+ description:
+ - "A CHAP password for logging into a target."
+ address:
+ description:
+ - "Address of the iSCSI storage server."
+ target:
+ description:
+ - "The target IQN for the storage device."
+ port:
+ description:
+ - "The port being used to connect with iscsi."
+ portal:
+ description:
+ - "The portal being used to connect with iscsi."
+ type: dict
+ check_upgrade:
+ description:
+ - "If I(true) and C(state) is I(upgraded) run check for upgrade
+ action before executing upgrade action."
+ default: True
+ type: bool
+ reboot_after_upgrade:
+ description:
+ - "If I(true) and C(state) is I(upgraded) reboot host after successful upgrade."
+ default: True
+ type: bool
+ vgpu_placement:
+ description:
+ - If I(consolidated), each vGPU is placed on the first physical card with
+ available space. This is the default placement, utilizing all available
+ space on the physical cards.
+ - If I(separated), each vGPU is placed on a separate physical card, if
+ possible. This can be useful for improving vGPU performance.
+ choices: ['consolidated', 'separated']
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add host with username/password supporting SR-IOV.
+# Note that override_iptables is false by default in oVirt/RHV:
+- ovirt.ovirt.ovirt_host:
+ cluster: Default
+ name: myhost
+ address: 10.34.61.145
+ password: secret
+ override_iptables: true
+ kernel_params:
+ - intel_iommu=on
+
+# Add host using public key
+- ovirt.ovirt.ovirt_host:
+ public_key: true
+ cluster: Default
+ name: myhost2
+ address: 10.34.61.145
+ override_iptables: true
+
+# Deploy hosted engine host
+- ovirt.ovirt.ovirt_host:
+ cluster: Default
+ name: myhost2
+ password: secret
+ address: 10.34.61.145
+ override_iptables: true
+ hosted_engine: deploy
+
+# Maintenance
+- ovirt.ovirt.ovirt_host:
+ state: maintenance
+ name: myhost
+
+# Restart host using power management:
+- ovirt.ovirt.ovirt_host:
+ state: restarted
+ name: myhost
+
+# Upgrade host
+- ovirt.ovirt.ovirt_host:
+ state: upgraded
+ name: myhost
+
+# discover iscsi targets
+- ovirt.ovirt.ovirt_host:
+ state: iscsidiscover
+ name: myhost
+ iscsi:
+ username: iscsi_user
+ password: secret
+ address: 10.34.61.145
+ port: 3260
+
+
+# login to iscsi targets
+- ovirt.ovirt.ovirt_host:
+ state: iscsilogin
+ name: myhost
+ iscsi:
+ username: iscsi_user
+ password: secret
+ address: 10.34.61.145
+ target: "iqn.2015-07.com.mlipchuk2.redhat:444"
+ port: 3260
+
+
+# Reinstall host using public key
+- ovirt.ovirt.ovirt_host:
+ state: reinstalled
+ name: myhost
+ public_key: true
+
+# Remove host
+- ovirt.ovirt.ovirt_host:
+ state: absent
+ name: myhost
+ force: True
+
+# Retry removing host when failed (https://bugzilla.redhat.com/show_bug.cgi?id=1719271)
+- ovirt.ovirt.ovirt_host:
+ state: absent
+ name: myhost
+ register: result
+ until: not result.failed
+ retries: 6
+ delay: 20
+
+# Change host Name
+- ovirt.ovirt.ovirt_host:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new host name"
+'''
+
+RETURN = '''
+id:
+ description: ID of the host which is managed
+ returned: On success if host is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+host:
+ description: "Dictionary of all the host attributes. Host attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host."
+ returned: On success if host is found.
+ type: dict
+iscsi_targets:
+ description: "List of host iscsi targets"
+ returned: On success if host is found and state is iscsidiscover.
+ type: list
+'''
+
+import time
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+
+ from ovirtsdk4.types import HostStatus as hoststate
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ wait,
+)
+
+
+class HostsModule(BaseModule):
+ def __init__(self, start_event=None, *args, **kwargs):
+ super(HostsModule, self).__init__(*args, **kwargs)
+ self.start_event = start_event
+
+ def build_entity(self):
+ return otypes.Host(
+ id=self._module.params.get('id'),
+ name=self.param('name'),
+ cluster=otypes.Cluster(
+ name=self.param('cluster')
+ ) if self.param('cluster') else None,
+ comment=self.param('comment'),
+ address=self.param('address'),
+ root_password=self.param('password'),
+ ssh=otypes.Ssh(
+ authentication_method=otypes.SshAuthenticationMethod.PUBLICKEY if self.param('public_key') else None,
+ port=self.param('ssh_port'),
+ ),
+ spm=otypes.Spm(
+ priority=self.param('spm_priority'),
+ ) if self.param('spm_priority') else None,
+ override_iptables=self.param('override_iptables'),
+ display=otypes.Display(
+ address=self.param('override_display'),
+ ) if self.param('override_display') else None,
+ os=otypes.OperatingSystem(
+ custom_kernel_cmdline=' '.join(self.param('kernel_params')),
+ ) if self.param('kernel_params') else None,
+ power_management=otypes.PowerManagement(
+ enabled=self.param('power_management_enabled'),
+ kdump_detection=self.param('kdump_integration') == 'enabled',
+ ) if self.param('power_management_enabled') is not None or self.param('kdump_integration') else None,
+ vgpu_placement=otypes.VgpuPlacement(
+ self.param('vgpu_placement')
+ ) if self.param('vgpu_placement') is not None else None,
+ )
+
+ def update_check(self, entity):
+ kernel_params = self.param('kernel_params')
+ return (
+ equal(self.param('comment'), entity.comment) and
+ equal(self.param('kdump_integration'), 'enabled' if entity.power_management.kdump_detection else 'disabled') and
+ equal(self.param('spm_priority'), entity.spm.priority) and
+ equal(self.param('name'), entity.name) and
+ equal(self.param('power_management_enabled'), entity.power_management.enabled) and
+ equal(self.param('override_display'), getattr(entity.display, 'address', None)) and
+ equal(self.param('vgpu_placement'), str(entity.vgpu_placement)) and
+ equal(
+ sorted(kernel_params) if kernel_params else None,
+ sorted(entity.os.custom_kernel_cmdline.split(' '))
+ )
+ )
+
+ def pre_remove(self, entity):
+ self.action(
+ entity=entity,
+ action='deactivate',
+ action_condition=lambda h: h.status != hoststate.MAINTENANCE,
+ wait_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ )
+
+ def post_reinstall(self, host):
+ wait(
+ service=self._service.service(host.id),
+ condition=lambda h: h.status != hoststate.MAINTENANCE,
+ fail_condition=failed_state,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def raise_host_exception(self):
+ events = self._connection.system_service().events_service().list(from_=int(self.start_event.index))
+ error_events = [
+ event.description for event in events
+ if event.host is not None and (event.host.id == self.param('id') or event.host.name == self.param('name')) and
+ event.severity in [otypes.LogSeverity.WARNING, otypes.LogSeverity.ERROR]
+ ]
+ if error_events:
+ raise Exception("Error message: %s" % error_events)
+ return True
+
+ def failed_state_after_reinstall(self, host, count=0):
+ if host.status in [
+ hoststate.ERROR,
+ hoststate.INSTALL_FAILED,
+ hoststate.NON_OPERATIONAL,
+ ]:
+ return self.raise_host_exception()
+
+ # If host is in non-responsive state after upgrade/install
+ # let's wait for few seconds and re-check again the state:
+ if host.status == hoststate.NON_RESPONSIVE:
+ if count <= 3:
+ time.sleep(20)
+ return self.failed_state_after_reinstall(
+ self._service.service(host.id).get(),
+ count + 1,
+ )
+ else:
+ return self.raise_host_exception()
+
+ return False
+
+
+def failed_state(host):
+ return host.status in [
+ hoststate.ERROR,
+ hoststate.INSTALL_FAILED,
+ hoststate.NON_RESPONSIVE,
+ hoststate.NON_OPERATIONAL,
+ ]
+
+
+def control_state(host_module):
+ host = host_module.search_entity()
+ if host is None:
+ return
+
+ state = host_module._module.params['state']
+ host_service = host_module._service.service(host.id)
+ if failed_state(host):
+ # In case host is in INSTALL_FAILED status, we can reinstall it:
+ if hoststate.INSTALL_FAILED == host.status and state != 'reinstalled':
+ raise Exception(
+ "Not possible to manage host '%s' in state '%s'." % (
+ host.name,
+ host.status
+ )
+ )
+ elif host.status in [
+ hoststate.REBOOT,
+ hoststate.CONNECTING,
+ hoststate.INITIALIZING,
+ hoststate.INSTALLING,
+ hoststate.INSTALLING_OS,
+ ]:
+ wait(
+ service=host_service,
+ condition=lambda host: host.status == hoststate.UP,
+ fail_condition=failed_state,
+ )
+ elif host.status == hoststate.PREPARING_FOR_MAINTENANCE:
+ wait(
+ service=host_service,
+ condition=lambda host: host.status == hoststate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+
+ return host
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=[
+ 'present', 'absent', 'maintenance', 'upgraded', 'started',
+ 'restarted', 'stopped', 'reinstalled', 'iscsidiscover', 'iscsilogin'
+ ],
+ default='present',
+ ),
+ name=dict(required=True),
+ id=dict(default=None),
+ comment=dict(default=None),
+ cluster=dict(default=None),
+ address=dict(default=None),
+ ssh_port=dict(default=None, type='int'),
+ password=dict(default=None, no_log=True),
+ public_key=dict(default=False, type='bool', aliases=['ssh_public_key']),
+ kdump_integration=dict(default=None, choices=['enabled', 'disabled']),
+ spm_priority=dict(default=None, type='int'),
+ override_iptables=dict(default=None, type='bool'),
+ force=dict(default=False, type='bool'),
+ timeout=dict(default=600, type='int'),
+ override_display=dict(default=None),
+ kernel_params=dict(default=None, type='list', elements='dict'),
+ hosted_engine=dict(default=None, choices=['deploy', 'undeploy']),
+ power_management_enabled=dict(default=None, type='bool'),
+ activate=dict(default=True, type='bool'),
+ iscsi=dict(default=None, type='dict'),
+ check_upgrade=dict(default=True, type='bool'),
+ reboot_after_upgrade=dict(default=True, type='bool'),
+ vgpu_placement=dict(default=None, choices=['consolidated', 'separated']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ['state', 'iscsidiscover', ['iscsi']],
+ ['state', 'iscsilogin', ['iscsi']]
+ ]
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ start_event = connection.system_service().events_service().list(max=1)[0]
+ hosts_module = HostsModule(
+ connection=connection,
+ module=module,
+ service=hosts_service,
+ start_event=start_event,
+ )
+
+ state = module.params['state']
+ host = control_state(hosts_module)
+ if state == 'present':
+ ret = hosts_module.create(
+ deploy_hosted_engine=(
+ module.params.get('hosted_engine') == 'deploy'
+ ) if module.params.get('hosted_engine') is not None else None,
+ activate=module.params['activate'],
+ result_state=(hoststate.MAINTENANCE if module.params['activate'] is False else hoststate.UP) if host is None else None,
+ fail_condition=hosts_module.failed_state_after_reinstall if host is None else lambda h: False,
+ )
+ if module.params['activate'] and host is not None:
+ ret = hosts_module.action(
+ action='activate',
+ action_condition=lambda h: h.status != hoststate.UP,
+ wait_condition=lambda h: h.status == hoststate.UP,
+ fail_condition=failed_state,
+ )
+ elif state == 'absent':
+ ret = hosts_module.remove()
+ elif state == 'maintenance':
+ hosts_module.action(
+ action='deactivate',
+ action_condition=lambda h: h.status != hoststate.MAINTENANCE,
+ wait_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+ ret = hosts_module.create()
+ elif state == 'upgraded':
+ result_state = hoststate.MAINTENANCE if host.status == hoststate.MAINTENANCE else hoststate.UP
+ events_service = connection.system_service().events_service()
+ last_event = events_service.list(max=1)[0]
+
+ if module.params['check_upgrade']:
+ hosts_module.action(
+ action='upgrade_check',
+ action_condition=lambda host: not host.update_available,
+ wait_condition=lambda host: host.update_available or (
+ len([
+ event
+ for event in events_service.list(
+ from_=int(last_event.id),
+ search='type=885',
+ # Uncomment when 4.1 is EOL, and remove the cond:
+ # if host.name in event.description
+ # search='type=885 and host.name=%s' % host.name,
+ ) if host.name in event.description
+ ]) > 0
+ ),
+ fail_condition=lambda host: len(events_service.list(
+ from_=int(last_event.id),
+ search='type=839 or type=887 and host.name=%s' % host.name,
+ )
+ ) > 0,
+ )
+ # Set to False, because upgrade_check isn't 'changing' action:
+ hosts_module._changed = False
+ ret = hosts_module.action(
+ action='upgrade',
+ action_condition=lambda h: h.update_available,
+ wait_condition=lambda h: h.status == result_state,
+ post_action=lambda h: time.sleep(module.params['poll_interval']),
+ fail_condition=lambda h: hosts_module.failed_state_after_reinstall(h) or (
+ len([
+ event
+ for event in events_service.list(
+ from_=int(last_event.id),
+ # Fail upgrade if migration fails:
+ # 17: Failed to switch Host to Maintenance mode
+ # 65, 140: Migration failed
+ # 166: No available host was found to migrate VM
+ search='type=65 or type=140 or type=166 or type=17',
+ ) if host.name in event.description
+ ]) > 0
+ ),
+ reboot=module.params['reboot_after_upgrade'],
+ )
+ elif state == 'iscsidiscover':
+ host_id = get_id_by_name(hosts_service, module.params['name'])
+ iscsi_param = module.params['iscsi']
+ iscsi_targets = hosts_service.service(host_id).iscsi_discover(
+ iscsi=otypes.IscsiDetails(
+ port=int(iscsi_param.get('port', 3260)),
+ username=iscsi_param.get('username'),
+ password=iscsi_param.get('password'),
+ address=iscsi_param.get('address'),
+ portal=iscsi_param.get('portal'),
+ ),
+ )
+ ret = {
+ 'changed': False,
+ 'id': host_id,
+ 'iscsi_targets': iscsi_targets,
+ }
+ elif state == 'iscsilogin':
+ host_id = get_id_by_name(hosts_service, module.params['name'])
+ iscsi_param = module.params['iscsi']
+ ret = hosts_module.action(
+ action='iscsi_login',
+ iscsi=otypes.IscsiDetails(
+ port=int(iscsi_param.get('port', 3260)),
+ username=iscsi_param.get('username'),
+ password=iscsi_param.get('password'),
+ address=iscsi_param.get('address'),
+ target=iscsi_param.get('target'),
+ portal=iscsi_param.get('portal'),
+ ),
+ )
+ elif state == 'started':
+ ret = hosts_module.action(
+ action='fence',
+ action_condition=lambda h: h.status == hoststate.DOWN,
+ wait_condition=lambda h: h.status in [hoststate.UP, hoststate.MAINTENANCE],
+ fail_condition=hosts_module.failed_state_after_reinstall,
+ fence_type='start',
+ )
+ elif state == 'stopped':
+ hosts_module.action(
+ action='deactivate',
+ action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN],
+ wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN],
+ fail_condition=failed_state,
+ )
+ ret = hosts_module.action(
+ action='fence',
+ action_condition=lambda h: h.status != hoststate.DOWN,
+ wait_condition=lambda h: h.status == hoststate.DOWN if module.params['wait'] else True,
+ fail_condition=failed_state,
+ fence_type='stop',
+ )
+ elif state == 'restarted':
+ ret = hosts_module.action(
+ action='fence',
+ wait_condition=lambda h: h.status == hoststate.UP,
+ fail_condition=hosts_module.failed_state_after_reinstall,
+ fence_type='restart',
+ )
+ elif state == 'reinstalled':
+ # Deactivate host if not in maintanence:
+ hosts_module.action(
+ action='deactivate',
+ action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN],
+ wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN],
+ fail_condition=failed_state,
+ )
+
+ # Reinstall host:
+ hosts_module.action(
+ action='install',
+ action_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ post_action=hosts_module.post_reinstall,
+ wait_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ fail_condition=hosts_module.failed_state_after_reinstall,
+ host=otypes.Host(
+ override_iptables=module.params['override_iptables'],
+ ) if module.params['override_iptables'] else None,
+ root_password=module.params['password'],
+ ssh=otypes.Ssh(
+ authentication_method=otypes.SshAuthenticationMethod.PUBLICKEY,
+ ) if module.params['public_key'] else None,
+ deploy_hosted_engine=(
+ module.params.get('hosted_engine') == 'deploy'
+ ) if module.params.get('hosted_engine') is not None else None,
+ undeploy_hosted_engine=(
+ module.params.get('hosted_engine') == 'undeploy'
+ ) if module.params.get('hosted_engine') is not None else None,
+ )
+
+ # Activate host after reinstall:
+ ret = hosts_module.action(
+ action='activate',
+ action_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ wait_condition=lambda h: h.status == hoststate.UP,
+ fail_condition=failed_state,
+ )
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_info.py
new file mode 100644
index 00000000..25249b67
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_info.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_info
+short_description: Retrieve information about one or more oVirt/RHV hosts
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV hosts."
+ - This module was called C(ovirt_host_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_host_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_hosts), which
+ contains a list of hosts. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search host X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ type: str
+ all_content:
+ description:
+ - "If I(true) all the attributes of the hosts should be
+ included in the response."
+ default: False
+ type: bool
+ cluster_version:
+ description:
+ - "Filter the hosts based on the cluster version."
+ type: str
+
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all hosts which names start with C(host) and
+# belong to data center C(west):
+- ovirt.ovirt.ovirt_host_info:
+ pattern: name=host* and datacenter=west
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+# All hosts with cluster version 4.2:
+- ovirt.ovirt.ovirt_host_info:
+ pattern: name=host*
+ cluster_version: "4.2"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+'''
+
+RETURN = '''
+ovirt_hosts:
+ description: "List of dictionaries describing the hosts. Host attributes are mapped to dictionary keys,
+ all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def get_filtered_hosts(cluster_version, hosts, connection):
+ # Filtering by cluster version returns only those which have same cluster version as input
+ filtered_hosts = []
+ for host in hosts:
+ cluster = connection.follow_link(host.cluster)
+ cluster_version_host = str(cluster.version.major) + '.' + str(cluster.version.minor)
+ if cluster_version_host == cluster_version:
+ filtered_hosts.append(host)
+ return filtered_hosts
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ cluster_version=dict(default=None, type='str'),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ hosts = hosts_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content']
+ )
+ cluster_version = module.params.get('cluster_version')
+ if cluster_version is not None:
+ hosts = get_filtered_hosts(cluster_version, hosts, connection)
+ result = dict(
+ ovirt_hosts=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in hosts
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_network.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_network.py
new file mode 100644
index 00000000..a48f1ed5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_network.py
@@ -0,0 +1,607 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016, 2018 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_network
+short_description: Module to manage host networks in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage host networks in oVirt/RHV."
+options:
+ name:
+ description:
+ - "Name of the host to manage networks for."
+ required: true
+ type: str
+ aliases:
+ - 'host'
+ state:
+ description:
+ - "Should the host be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ bond:
+ description:
+ - "Dictionary describing network bond:"
+ suboptions:
+ name:
+ description:
+ - Bond name.
+ mode:
+ description:
+ - Bonding mode.
+ options:
+ description:
+ - Bonding options.
+ interfaces:
+ description:
+ - List of interfaces to create a bond.
+ type: dict
+ interface:
+ description:
+ - "Name of the network interface where logical network should be attached."
+ type: str
+ networks:
+ description:
+ - "List of dictionary describing networks to be attached to interface or bond:"
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the logical network to be assigned to bond or interface.
+ boot_protocol:
+ description:
+ - Boot protocol.
+ choices: ['none', 'static', 'dhcp']
+ address:
+ description:
+ - IP address in case of I(static) boot protocol is used.
+ netmask:
+ description:
+ - Subnet mask in case of I(static) boot protocol is used.
+ gateway:
+ description:
+ - Gateway in case of I(static) boot protocol is used.
+ version:
+ description:
+ - IP version. Either v4 or v6. Default is v4.
+ custom_properties:
+ description:
+ - "Custom properties applied to the host network."
+ - "Custom properties is a list of dictionary which can have following values."
+ suboptions:
+ name:
+ description:
+ - Name of custom property.
+ value:
+ description:
+ - Value of custom property.
+ labels:
+ description:
+ - "List of names of the network label to be assigned to bond or interface."
+ type: list
+ elements: str
+ check:
+ description:
+ - "If I(true) verify connectivity between host and engine."
+ - "Network configuration changes will be rolled back if connectivity between
+ engine and the host is lost after changing network configuration."
+ type: bool
+ save:
+ description:
+ - "If I(true) network configuration will be persistent, otherwise it is temporary. Default I(true) since Ansible 2.8."
+ type: bool
+ default: True
+ sync_networks:
+ description:
+ - "If I(true) all networks will be synchronized before modification"
+ type: bool
+ default: false
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# In all examples the durability of the configuration created is dependent on the 'save' option value:
+
+# Create bond on eth0 and eth1 interface, and put 'myvlan' network on top of it and persist the new configuration:
+- name: Bonds
+ ovirt.ovirt.ovirt_host_network:
+ name: myhost
+ save: yes
+ bond:
+ name: bond0
+ mode: 2
+ interfaces:
+ - eth1
+ - eth2
+ networks:
+ - name: myvlan
+ boot_protocol: static
+ address: 1.2.3.4
+ netmask: 255.255.255.0
+ gateway: 1.2.3.4
+ version: v4
+
+# Create bond on eth1 and eth2 interface, specifying both mode and miimon:
+- name: Bonds
+ ovirt.ovirt.ovirt_host_network:
+ name: myhost
+ bond:
+ name: bond0
+ mode: 1
+ options:
+ miimon: 200
+ interfaces:
+ - eth1
+ - eth2
+
+# Remove bond0 bond from host interfaces:
+- ovirt.ovirt.ovirt_host_network:
+ state: absent
+ name: myhost
+ bond:
+ name: bond0
+
+# Assign myvlan1 and myvlan2 vlans to host eth0 interface:
+- ovirt.ovirt.ovirt_host_network:
+ name: myhost
+ interface: eth0
+ networks:
+ - name: myvlan1
+ - name: myvlan2
+
+# Remove myvlan2 vlan from host eth0 interface:
+- ovirt.ovirt.ovirt_host_network:
+ state: absent
+ name: myhost
+ interface: eth0
+ networks:
+ - name: myvlan2
+
+# Remove all networks/vlans from host eth0 interface:
+- ovirt.ovirt.ovirt_host_network:
+ state: absent
+ name: myhost
+ interface: eth0
+
+# Add custom_properties to network:
+- ovirt.ovirt.ovirt_host_network:
+ name: myhost
+ interface: eth0
+ networks:
+ - name: myvlan1
+ custom_properties:
+ - name: bridge_opts
+ value: gc_timer=10
+'''
+
+RETURN = '''
+id:
+ description: ID of the host NIC which is managed
+ returned: On success if host NIC is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+host_nic:
+ description: "Dictionary of all the host NIC attributes. Host NIC attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_nic."
+ returned: On success if host NIC is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ get_entity,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_name,
+ engine_supported
+)
+
+
+def get_bond_options(mode, usr_opts):
+ MIIMON_100 = dict(miimon='100')
+ DEFAULT_MODE_OPTS = {
+ '1': MIIMON_100,
+ '2': MIIMON_100,
+ '3': MIIMON_100,
+ '4': dict(xmit_hash_policy='2', **MIIMON_100)
+ }
+
+ options = []
+ if mode is None:
+ return options
+
+ def get_type_name(mode_number):
+ """
+ We need to maintain this type strings, for the __compare_options method,
+ for easier comparision.
+ """
+ modes = [
+ 'Active-Backup',
+ 'Load balance (balance-xor)',
+ None,
+ 'Dynamic link aggregation (802.3ad)',
+ ]
+ if (not 0 < mode_number <= len(modes)):
+ return None
+ return modes[mode_number - 1]
+
+ try:
+ mode_number = int(mode)
+ except ValueError:
+ raise Exception('Bond mode must be a number.')
+
+ options.append(
+ otypes.Option(
+ name='mode',
+ type=get_type_name(mode_number),
+ value=str(mode_number)
+ )
+ )
+
+ opts_dict = DEFAULT_MODE_OPTS.get(str(mode), {})
+ if usr_opts is not None:
+ opts_dict.update(**usr_opts)
+
+ options.extend(
+ [otypes.Option(name=opt, value=str(value))
+ for opt, value in six.iteritems(opts_dict)]
+ )
+ return options
+
+
+class HostNetworksModule(BaseModule):
+
+ def __compare_options(self, new_options, old_options):
+ return sorted((get_dict_of_struct(opt) for opt in new_options),
+ key=lambda x: x["name"]) != sorted((get_dict_of_struct(opt) for opt in old_options),
+ key=lambda x: x["name"])
+
+ def build_entity(self):
+ return otypes.Host()
+
+ def update_custom_properties(self, attachments_service, attachment, network):
+ if network.get('custom_properties'):
+ current = []
+ if attachment.properties:
+ current = [(cp.name, str(cp.value)) for cp in attachment.properties]
+ passed = [(cp.get('name'), str(cp.get('value'))) for cp in network.get('custom_properties') if cp]
+ if sorted(current) != sorted(passed):
+ attachment.properties = [
+ otypes.Property(
+ name=prop.get('name'),
+ value=prop.get('value')
+ ) for prop in network.get('custom_properties')
+ ]
+ if not self._module.check_mode:
+ attachments_service.service(attachment.id).update(attachment)
+ self.changed = True
+
+ def update_address(self, attachments_service, attachment, network):
+ # Check if there is any change in address assignments and
+ # update it if needed:
+ for ip in attachment.ip_address_assignments:
+ if str(ip.ip.version) == network.get('version', 'v4'):
+ changed = False
+ if not equal(network.get('boot_protocol'), str(ip.assignment_method)):
+ ip.assignment_method = otypes.BootProtocol(network.get('boot_protocol'))
+ changed = True
+ if not equal(network.get('address'), ip.ip.address):
+ ip.ip.address = network.get('address')
+ changed = True
+ if not equal(network.get('gateway'), ip.ip.gateway):
+ ip.ip.gateway = network.get('gateway')
+ changed = True
+ if not equal(network.get('netmask'), ip.ip.netmask):
+ ip.ip.netmask = network.get('netmask')
+ changed = True
+
+ if changed:
+ if not self._module.check_mode:
+ attachments_service.service(attachment.id).update(attachment)
+ self.changed = True
+ break
+
+ def has_update(self, nic_service):
+ update = False
+ bond = self._module.params['bond']
+ networks = self._module.params['networks']
+ labels = self._module.params['labels']
+ nic = get_entity(nic_service)
+
+ if nic is None:
+ return update
+
+ # Check if bond configuration should be updated:
+ if bond:
+ update = self.__compare_options(get_bond_options(bond.get('mode'), bond.get('options')), getattr(nic.bonding, 'options', []))
+ update = update or not equal(
+ sorted(bond.get('interfaces')) if bond.get('interfaces') else None,
+ sorted(get_link_name(self._connection, s) for s in nic.bonding.slaves)
+ )
+
+ # Check if labels need to be updated on interface/bond:
+ if labels:
+ net_labels = nic_service.network_labels_service().list()
+ # If any labels which user passed aren't assigned, relabel the interface:
+ if sorted(labels) != sorted([lbl.id for lbl in net_labels]):
+ return True
+
+ if not networks:
+ return update
+
+ # Check if networks attachments configuration should be updated:
+ attachments_service = nic_service.network_attachments_service()
+ network_names = [network.get('name') for network in networks]
+
+ attachments = {}
+ for attachment in attachments_service.list():
+ name = get_link_name(self._connection, attachment.network)
+ if name in network_names:
+ attachments[name] = attachment
+
+ for network in networks:
+ attachment = attachments.get(network.get('name'))
+ # If attachment don't exists, we need to create it:
+ if attachment is None:
+ return True
+ self.update_custom_properties(attachments_service, attachment, network)
+ self.update_address(attachments_service, attachment, network)
+
+ return update
+
+ def _action_save_configuration(self, entity):
+ if not self._module.check_mode:
+ self._service.service(entity.id).commit_net_config()
+ self.changed = True
+
+
+def needs_sync(nics_service):
+ nics = nics_service.list()
+ for nic in nics:
+ nic_service = nics_service.nic_service(nic.id)
+ for network_attachment_service in nic_service.network_attachments_service().list():
+ if not network_attachment_service.in_sync:
+ return True
+ return False
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(aliases=['host'], required=True),
+ bond=dict(default=None, type='dict'),
+ interface=dict(default=None),
+ networks=dict(default=None, type='list', elements='dict'),
+ labels=dict(default=None, type='list', elements='str'),
+ check=dict(default=None, type='bool'),
+ save=dict(default=True, type='bool'),
+ sync_networks=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ host_networks_module = HostNetworksModule(
+ connection=connection,
+ module=module,
+ service=hosts_service,
+ )
+
+ host = host_networks_module.search_entity()
+ if host is None:
+ raise Exception("Host '%s' was not found." % module.params['name'])
+
+ bond = module.params['bond']
+ interface = module.params['interface']
+ networks = module.params['networks']
+ labels = module.params['labels']
+ nic_name = bond.get('name') if bond else module.params['interface']
+
+ host_service = hosts_service.host_service(host.id)
+ nics_service = host_service.nics_service()
+ nic = search_by_name(nics_service, nic_name)
+
+ if module.params["sync_networks"]:
+ if needs_sync(nics_service):
+ if not module.check_mode:
+ host_service.sync_all_networks()
+ host_networks_module.changed = True
+
+ network_names = [network['name'] for network in networks or []]
+ state = module.params['state']
+
+ if (
+ state == 'present' and
+ (nic is None or host_networks_module.has_update(nics_service.service(nic.id)))
+ ):
+ # Remove networks which are attached to different interface then user want:
+ attachments_service = host_service.network_attachments_service()
+
+ # Append attachment ID to network if needs update:
+ for a in attachments_service.list():
+ current_network_name = get_link_name(connection, a.network)
+ if current_network_name in network_names:
+ for n in networks:
+ if n['name'] == current_network_name:
+ n['id'] = a.id
+
+ # Check if we have to break some bonds:
+ removed_bonds = []
+ if nic is not None:
+ for host_nic in nics_service.list():
+ if host_nic.bonding and nic.id in [slave.id for slave in host_nic.bonding.slaves]:
+ removed_bonds.append(otypes.HostNic(id=host_nic.id))
+
+ # Assign the networks:
+ setup_params = dict(
+ entity=host,
+ action='setup_networks',
+ check_connectivity=module.params['check'],
+ removed_bonds=removed_bonds if removed_bonds else None,
+ modified_bonds=[
+ otypes.HostNic(
+ name=bond.get('name'),
+ bonding=otypes.Bonding(
+ options=get_bond_options(bond.get('mode'), bond.get('options')),
+ slaves=[
+ otypes.HostNic(name=i) for i in bond.get('interfaces', [])
+ ],
+ ),
+ ),
+ ] if bond else None,
+ modified_labels=[
+ otypes.NetworkLabel(
+ id=str(name),
+ host_nic=otypes.HostNic(
+ name=bond.get('name') if bond else interface
+ ),
+ ) for name in labels
+ ] if labels else None,
+ modified_network_attachments=[
+ otypes.NetworkAttachment(
+ id=network.get('id'),
+ network=otypes.Network(
+ name=network['name']
+ ) if network['name'] else None,
+ host_nic=otypes.HostNic(
+ name=bond.get('name') if bond else interface
+ ),
+ ip_address_assignments=[
+ otypes.IpAddressAssignment(
+ assignment_method=otypes.BootProtocol(
+ network.get('boot_protocol', 'none')
+ ),
+ ip=otypes.Ip(
+ address=network.get('address'),
+ gateway=network.get('gateway'),
+ netmask=network.get('netmask'),
+ version=otypes.IpVersion(
+ network.get('version')
+ ) if network.get('version') else None,
+ ),
+ ),
+ ],
+ properties=[
+ otypes.Property(
+ name=prop.get('name'),
+ value=prop.get('value')
+ ) for prop in network.get('custom_properties', [])
+ ]
+ ) for network in networks
+ ] if networks else None,
+ )
+ if engine_supported(connection, '4.3'):
+ setup_params['commit_on_success'] = module.params['save']
+ elif module.params['save']:
+ setup_params['post_action'] = host_networks_module._action_save_configuration
+ host_networks_module.action(**setup_params)
+ elif state == 'absent' and nic:
+ attachments = []
+ nic_service = nics_service.nic_service(nic.id)
+
+ attached_labels = set([str(lbl.id) for lbl in nic_service.network_labels_service().list()])
+ if networks:
+ attachments_service = nic_service.network_attachments_service()
+ attachments = attachments_service.list()
+ attachments = [
+ attachment for attachment in attachments
+ if get_link_name(connection, attachment.network) in network_names
+ ]
+
+ # Remove unmanaged networks:
+ unmanaged_networks_service = host_service.unmanaged_networks_service()
+ unmanaged_networks = [(u.id, u.name) for u in unmanaged_networks_service.list()]
+ for net_id, net_name in unmanaged_networks:
+ if net_name in network_names:
+ if not module.check_mode:
+ unmanaged_networks_service.unmanaged_network_service(net_id).remove()
+ host_networks_module.changed = True
+
+ # Need to check if there are any labels to be removed, as backend fail
+ # if we try to send remove non existing label, for bond and attachments it's OK:
+ if (labels and set(labels).intersection(attached_labels)) or bond or attachments:
+ setup_params = dict(
+ entity=host,
+ action='setup_networks',
+ check_connectivity=module.params['check'],
+ removed_bonds=[
+ otypes.HostNic(
+ name=bond.get('name'),
+ ),
+ ] if bond else None,
+ removed_labels=[
+ otypes.NetworkLabel(id=str(name)) for name in labels
+ ] if labels else None,
+ removed_network_attachments=attachments if attachments else None,
+ )
+ if engine_supported(connection, '4.4'):
+ setup_params['commit_on_success'] = module.params['save']
+ elif module.params['save']:
+ setup_params['post_action'] = host_networks_module._action_save_configuration
+ host_networks_module.action(**setup_params)
+
+ nic = search_by_name(nics_service, nic_name)
+ module.exit_json(**{
+ 'changed': host_networks_module.changed,
+ 'id': nic.id if nic else None,
+ 'host_nic': get_dict_of_struct(nic),
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_pm.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_pm.py
new file mode 100644
index 00000000..ca32a20c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_pm.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_pm
+short_description: Module to manage power management of hosts in oVirt/RHV
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage power management of hosts in oVirt/RHV."
+options:
+ name:
+ description:
+ - "Name of the host to manage."
+ required: true
+ aliases: ['host']
+ type: str
+ state:
+ description:
+ - "Should the host be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ address:
+ description:
+ - "Address of the power management interface."
+ type: str
+ username:
+ description:
+ - "Username to be used to connect to power management interface."
+ type: str
+ password:
+ description:
+ - "Password of the user specified in C(username) parameter."
+ type: str
+ type:
+ description:
+ - "Type of the power management. oVirt/RHV predefined values are I(drac5), I(ipmilan), I(rsa),
+ I(bladecenter), I(alom), I(apc), I(apc_snmp), I(eps), I(wti), I(rsb), I(cisco_ucs),
+ I(drac7), I(hpblade), I(ilo), I(ilo2), I(ilo3), I(ilo4), I(ilo_ssh),
+ but user can have defined custom type."
+ type: str
+ port:
+ description:
+ - "Power management interface port."
+ type: int
+ options:
+ description:
+ - "Dictionary of additional fence agent options (including Power Management slot)."
+ - "Additional information about options can be found at U(https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md)."
+ type: dict
+ encrypt_options:
+ description:
+ - "If I(true) options will be encrypted when send to agent."
+ aliases: ['encrypt']
+ type: bool
+ order:
+ description:
+ - "Integer value specifying, by default it's added at the end."
+ type: int
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add fence agent to host 'myhost'
+- ovirt.ovirt.ovirt_host_pm:
+ name: myhost
+ address: 1.2.3.4
+ options:
+ myoption1: x
+ myoption2: y
+ username: admin
+ password: admin
+ port: 3333
+ type: ipmilan
+
+# Add fence agent to host 'myhost' using 'slot' option
+- ovirt.ovirt.ovirt_host_pm:
+ name: myhost
+ address: 1.2.3.4
+ options:
+ myoption1: x
+ myoption2: y
+ slot: myslot
+ username: admin
+ password: admin
+ port: 3333
+ type: ipmilan
+
+
+# Remove ipmilan fence agent with address 1.2.3.4 on host 'myhost'
+- ovirt.ovirt.ovirt_host_pm:
+ state: absent
+ name: myhost
+ address: 1.2.3.4
+ type: ipmilan
+'''
+
+RETURN = '''
+id:
+ description: ID of the agent which is managed
+ returned: On success if agent is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+agent:
+ description: "Dictionary of all the agent attributes. Agent attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/agent."
+ returned: On success if agent is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+class HostModule(BaseModule):
+ def build_entity(self):
+ return otypes.Host(
+ power_management=otypes.PowerManagement(
+ enabled=True,
+ ),
+ )
+
+ def update_check(self, entity):
+ return equal(True, entity.power_management.enabled)
+
+
+class HostPmModule(BaseModule):
+
+ def pre_create(self, entity):
+ # Save the entity, so we know if Agent already existed
+ self.entity = entity
+
+ def build_entity(self):
+ last = next((s for s in sorted([a.order for a in self._service.list()])), 0)
+ order = self.param('order') if self.param('order') is not None else self.entity.order if self.entity else last + 1
+ return otypes.Agent(
+ address=self._module.params['address'],
+ encrypt_options=self._module.params['encrypt_options'],
+ options=[
+ otypes.Option(
+ name=name,
+ value=value,
+ ) for name, value in self._module.params['options'].items()
+ ] if self._module.params['options'] else None,
+ password=self._module.params['password'],
+ port=self._module.params['port'],
+ type=self._module.params['type'],
+ username=self._module.params['username'],
+ order=order,
+ )
+
+ def update_check(self, entity):
+ def check_options():
+ if self.param('options'):
+ current = []
+ if entity.options:
+ current = [(opt.name, str(opt.value)) for opt in entity.options]
+ passed = [(k, str(v)) for k, v in self.param('options').items()]
+ return sorted(current) == sorted(passed)
+ return True
+
+ return (
+ check_options() and
+ equal(self._module.params.get('address'), entity.address) and
+ equal(self._module.params.get('encrypt_options'), entity.encrypt_options) and
+ equal(self._module.params.get('username'), entity.username) and
+ equal(self._module.params.get('port'), entity.port) and
+ equal(self._module.params.get('type'), entity.type) and
+ equal(self._module.params.get('order'), entity.order)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True, aliases=['host']),
+ address=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ type=dict(default=None),
+ port=dict(default=None, type='int'),
+ order=dict(default=None, type='int'),
+ options=dict(default=None, type='dict'),
+ encrypt_options=dict(default=None, type='bool', aliases=['encrypt']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ host = search_by_name(hosts_service, module.params['name'])
+ fence_agents_service = hosts_service.host_service(host.id).fence_agents_service()
+
+ host_pm_module = HostPmModule(
+ connection=connection,
+ module=module,
+ service=fence_agents_service,
+ )
+ host_module = HostModule(
+ connection=connection,
+ module=module,
+ service=hosts_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ agent = host_pm_module.search_entity(
+ search_params={
+ 'address': module.params['address'],
+ 'type': module.params['type'],
+ }
+ )
+ ret = host_pm_module.create(entity=agent)
+
+ # Enable Power Management, if it's not enabled:
+ host_module.create(entity=host)
+ elif state == 'absent':
+ agent = host_pm_module.search_entity(
+ search_params={
+ 'address': module.params['address'],
+ 'type': module.params['type'],
+ }
+ )
+ ret = host_pm_module.remove(entity=agent)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_storage_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_storage_info.py
new file mode 100644
index 00000000..3df410bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_host_storage_info.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_storage_info
+short_description: Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)
+version_added: "1.0.0"
+author: "Daniel Erez (@derez)"
+description:
+ - "Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)."
+ - This module was called C(ovirt_host_storage_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_host_storage_info) module no longer returns C(ansible_facts)!
+options:
+ host:
+ description:
+ - "Host to get device list from."
+ required: true
+ type: str
+ iscsi:
+ description:
+ - "Dictionary with values for iSCSI storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the iSCSI storage server."
+ target:
+ description:
+ - "The target IQN for the storage device."
+ username:
+ description:
+ - "A CHAP user name for logging into a target."
+ password:
+ description:
+ - "A CHAP password for logging into a target."
+ portal:
+ description:
+ - "The portal being used to connect with iscsi."
+ type: dict
+ fcp:
+ description:
+ - "Dictionary with values for fibre channel storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the fibre channel storage server."
+ port:
+ description:
+ - "Port of the fibre channel storage server."
+ lun_id:
+ description:
+ - "LUN id."
+ type: dict
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about HostStorages with specified target and address:
+- ovirt.ovirt.ovirt_host_storage_info:
+ host: myhost
+ iscsi:
+ target: iqn.2016-08-09.domain-01:nickname
+ address: 10.34.63.204
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_host_storages }}"
+'''
+
+RETURN = '''
+ovirt_host_storages:
+ description: "List of dictionaries describing the HostStorage. HostStorage attributes are mapped to dictionary keys,
+ all HostStorage attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_storage."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name,
+)
+
+
+def _login(host_service, iscsi):
+ host_service.iscsi_login(
+ iscsi=otypes.IscsiDetails(
+ username=iscsi.get('username'),
+ password=iscsi.get('password'),
+ address=iscsi.get('address'),
+ target=iscsi.get('target'),
+ portal=iscsi.get('portal')
+ ),
+ )
+
+
+def _get_storage_type(params):
+ for sd_type in ['iscsi', 'fcp']:
+ if params.get(sd_type) is not None:
+ return sd_type
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ host=dict(required=True),
+ iscsi=dict(default=None, type='dict'),
+ fcp=dict(default=None, type='dict'),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+
+ # Get Host
+ hosts_service = connection.system_service().hosts_service()
+ host_id = get_id_by_name(hosts_service, module.params['host'])
+ storage_type = _get_storage_type(module.params)
+ host_service = hosts_service.host_service(host_id)
+
+ if storage_type == 'iscsi':
+ # Login
+ iscsi = module.params.get('iscsi')
+ _login(host_service, iscsi)
+
+ # Get LUNs exposed from the specified target
+ host_storages = host_service.storage_service().list()
+
+ if storage_type == 'iscsi':
+ filterred_host_storages = [host_storage for host_storage in host_storages
+ if host_storage.type == otypes.StorageType.ISCSI]
+ if 'target' in iscsi:
+ filterred_host_storages = [host_storage for host_storage in filterred_host_storages
+ if iscsi.get('target') == host_storage.logical_units[0].target]
+ elif storage_type == 'fcp':
+ filterred_host_storages = [host_storage for host_storage in host_storages
+ if host_storage.type == otypes.StorageType.FCP]
+
+ result = dict(
+ ovirt_host_storages=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in filterred_host_storages
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_instance_type.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_instance_type.py
new file mode 100644
index 00000000..6a39e2b5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_instance_type.py
@@ -0,0 +1,632 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_instance_type
+short_description: Module to manage Instance Types in oVirt/RHV
+version_added: "1.0.0"
+author:
+- Martin Necas (@mnecas)
+- Ondra Machacek (@machacekondra)
+description:
+ - This module manages whole lifecycle of the Instance Type in oVirt/RHV.
+options:
+ name:
+ description:
+ - Name of the Instance Type to manage.
+ - If instance type don't exists C(name) is required. Otherwise C(id) or C(name) can be used.
+ type: str
+ id:
+ description:
+ - ID of the Instance Type to manage.
+ type: str
+ state:
+ description:
+ - Should the Instance Type be present/absent.
+ - I(present) state will create/update instance type and don't change its state if it already exists.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ memory:
+ description:
+ - Amount of memory of the Instance Type. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ type: str
+ memory_guaranteed:
+ description:
+ - Amount of minimal guaranteed memory of the Instance Type.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
+ - Default value is set by engine.
+ type: str
+ nics:
+ description:
+ - List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
+ - NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
+ To manage NICs of the instance type in more depth please use M(ovirt.ovirt.ovirt_nic) module instead.
+ suboptions:
+ name:
+ description:
+ - "Name of the NIC."
+ type: str
+ profile_name:
+ description:
+ - "Profile name where NIC should be attached."
+ type: str
+ interface:
+ description:
+ - "Type of the network interface."
+ type: str
+ choices: [ virtio, e1000, rtl8139 ]
+ default: virtio
+ mac_address:
+ description:
+ - "Custom MAC address of the network interface, by default it's obtained from MAC pool."
+ type: str
+ type: list
+ elements: dict
+ memory_max:
+ description:
+ - Upper bound of instance type memory up to which memory hot-plug can be performed.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ type: str
+ cpu_cores:
+ description:
+ - Number of virtual CPUs cores of the Instance Type.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ cpu_sockets:
+ description:
+ - Number of virtual CPUs sockets of the Instance Type.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ cpu_threads:
+ description:
+ - Number of virtual CPUs sockets of the Instance Type.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ operating_system:
+ description:
+ - Operating system of the Instance Type, for example 'rhel_8x64'.
+ - Default value is set by oVirt/RHV engine.
+ - Use the M(ovirt.ovirt.ovirt_vm_os_info) module to obtain the current list.
+ type: str
+ boot_devices:
+ description:
+ - List of boot devices which should be used to boot. For example C([ cdrom, hd ]).
+ - Default value is set by oVirt/RHV engine.
+ choices: [ cdrom, hd, network ]
+ type: list
+ elements: str
+ serial_console:
+ description:
+ - "I(True) enable VirtIO serial console, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ usb_support:
+ description:
+ - "I(True) enable USB support, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ high_availability:
+ description:
+ - If I(yes) Instance Type will be set as highly available.
+ - If I(no) Instance Type won't be set as highly available.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: bool
+ high_availability_priority:
+ description:
+ - Indicates the priority of the instance type inside the run and migration queues.
+ Instance Type with higher priorities will be started and migrated before instance types with lower
+ priorities. The value is an integer between 0 and 100. The higher the value, the higher the priority.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: int
+ watchdog:
+ description:
+ - "Assign watchdog device for the instance type."
+ - "Watchdogs is a dictionary which can have following values:"
+ - "C(model) - Model of the watchdog device. For example: I(i6300esb), I(diag288) or I(null)."
+ - "C(action) - Watchdog action to be performed when watchdog is triggered. For example: I(none), I(reset), I(poweroff), I(pause) or I(dump)."
+ type: dict
+ host:
+ description:
+ - Specify host where Instance Type should be running. By default the host is chosen by engine scheduler.
+ - This parameter is used only when C(state) is I(running) or I(present).
+ type: str
+ graphical_console:
+ description:
+ - "Assign graphical console to the instance type."
+ - "Graphical console is a dictionary which can have following values:"
+ - "C(headless_mode) - If I(true) disable the graphics console for this instance type."
+ - "C(protocol) - Graphical protocol, a list of I(spice), I(vnc), or both."
+ type: dict
+ description:
+ description:
+ - "Description of the instance type."
+ type: str
+ cpu_mode:
+ description:
+ - "CPU mode of the instance type. It can be some of the following: I(host_passthrough), I(host_model) or I(custom)."
+ - "For I(host_passthrough) CPU type you need to set C(placement_policy) to I(pinned)."
+ - "If no value is passed, default value is set by oVirt/RHV engine."
+ type: str
+ rng_device:
+ description:
+ - "Random number generator (RNG). You can choose of one the following devices I(urandom), I(random) or I(hwrng)."
+ - "In order to select I(hwrng), you must have it enabled on cluster first."
+ - "/dev/urandom is used for cluster version >= 4.1, and /dev/random for cluster version <= 4.0"
+ type: str
+ rng_bytes:
+ description:
+ - "Number of bytes allowed to consume per period."
+ type: int
+ rng_period:
+ description:
+ - "Duration of one period in milliseconds."
+ type: int
+ placement_policy:
+ description:
+ - "The configuration of the instance type's placement policy."
+ - "Placement policy can be one of the following values:"
+ - "C(migratable) - Allow manual and automatic migration."
+ - "C(pinned) - Do not allow migration."
+ - "C(user_migratable) - Allow manual migration only."
+ - "If no value is passed, default value is set by oVirt/RHV engine."
+ type: str
+ cpu_pinning:
+ description:
+ - "CPU Pinning topology to map instance type CPU to host CPU."
+ - "CPU Pinning topology is a list of dictionary which can have following values:"
+ suboptions:
+ cpu:
+ description:
+ - "Number of the host CPU."
+ vcpu:
+ description:
+ - "Number of the instance type CPU."
+ type: list
+ elements: dict
+ soundcard_enabled:
+ description:
+ - "If I(true), the sound card is added to the instance type."
+ type: bool
+ smartcard_enabled:
+ description:
+ - "If I(true), use smart card authentication."
+ type: bool
+ virtio_scsi:
+ description:
+ - "If I(true), virtio scsi will be enabled."
+ type: bool
+ io_threads:
+ description:
+ - "Number of IO threads used by instance type. I(0) means IO threading disabled."
+ type: int
+ ballooning_enabled:
+ description:
+ - "If I(true), use memory ballooning."
+ - "Memory balloon is a guest device, which may be used to re-distribute / reclaim the host memory
+ based on instance type needs in a dynamic way. In this way it's possible to create memory over commitment states."
+ type: bool
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create instance type
+- name: Create instance type
+ ovirt.ovirt.ovirt_instance_type:
+ state: present
+ name: myit
+ rng_device: hwrng
+ rng_bytes: 200
+ rng_period: 200
+ soundcard_enabled: true
+ virtio_scsi: true
+ boot_devices:
+ - network
+
+# Remove instance type
+- ovirt.ovirt.ovirt_instance_type:
+ state: absent
+ name: myit
+
+
+# Create instance type with predefined memory and cpu limits.
+- ovirt.ovirt.ovirt_instance_type:
+ state: present
+ name: myit
+ memory: 2GiB
+ cpu_cores: 2
+ cpu_sockets: 2
+ nics:
+ - name: nic1
+
+# Enable usb support and serial console
+- ovirt.ovirt.ovirt_instance_type:
+ name: myit
+ usb_support: True
+ serial_console: True
+
+# Use graphical console with spice and vnc
+- name: Create a instance type that has the console configured for both Spice and VNC
+ ovirt.ovirt.ovirt_instance_type:
+ name: myit
+ graphical_console:
+ protocol:
+ - spice
+ - vnc
+'''
+
+
+RETURN = '''
+
+id:
+ description: ID of the instance type which is managed
+ returned: On success if instance type is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+instancetype:
+ description: "Dictionary of all the instance type attributes. instance type attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/instance_type."
+ returned: On success if instance type is found.
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import traceback
+
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_params,
+ check_sdk,
+ convert_to_bytes,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ get_entity,
+ get_link_name,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+ wait,
+)
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+
+class InstanceTypeModule(BaseModule):
+ def build_entity(self):
+ return otypes.InstanceType(
+ id=self.param('id'),
+ name=self.param('name'),
+ console=(
+ otypes.Console(enabled=self.param('serial_console'))
+ ) if self.param('serial_console') is not None else None,
+ usb=(
+ otypes.Usb(enabled=self.param('usb_support'))
+ ) if self.param('usb_support') is not None else None,
+ high_availability=otypes.HighAvailability(
+ enabled=self.param('high_availability'),
+ priority=self.param('high_availability_priority'),
+ ) if self.param('high_availability') is not None or self.param('high_availability_priority') else None,
+ cpu=otypes.Cpu(
+ topology=otypes.CpuTopology(
+ cores=self.param('cpu_cores'),
+ sockets=self.param('cpu_sockets'),
+ threads=self.param('cpu_threads'),
+ ) if any((
+ self.param('cpu_cores'),
+ self.param('cpu_sockets'),
+ self.param('cpu_threads')
+ )) else None,
+ cpu_tune=otypes.CpuTune(
+ vcpu_pins=[
+ otypes.VcpuPin(vcpu=int(pin['vcpu']), cpu_set=str(pin['cpu'])) for pin in self.param('cpu_pinning')
+ ],
+ ) if self.param('cpu_pinning') else None,
+ mode=otypes.CpuMode(self.param('cpu_mode')) if self.param(
+ 'cpu_mode') else None,
+ ) if any((
+ self.param('cpu_cores'),
+ self.param('cpu_sockets'),
+ self.param('cpu_threads'),
+ self.param('cpu_mode'),
+ self.param('cpu_pinning')
+ )) else None,
+ os=otypes.OperatingSystem(
+ type=self.param('operating_system'),
+ boot=otypes.Boot(
+ devices=[
+ otypes.BootDevice(dev) for dev in self.param('boot_devices')
+ ],
+ ) if self.param('boot_devices') else None
+ ),
+ rng_device=otypes.RngDevice(
+ source=otypes.RngSource(self.param('rng_device')),
+ rate=otypes.Rate(
+ bytes=self.param('rng_bytes'),
+ period=self.param('rng_period')
+ )
+ ) if self.param('rng_device') else None,
+ memory=convert_to_bytes(
+ self.param('memory')
+ ) if self.param('memory') else None,
+ virtio_scsi=otypes.VirtioScsi(
+ enabled=self.param('virtio_scsi')
+ ) if self.param('virtio_scsi') else None,
+ memory_policy=otypes.MemoryPolicy(
+ guaranteed=convert_to_bytes(self.param('memory_guaranteed')),
+ ballooning=self.param('ballooning_enabled'),
+ max=convert_to_bytes(self.param('memory_max')),
+ ) if any((
+ self.param('memory_guaranteed'),
+ self.param('ballooning_enabled') is not None,
+ self.param('memory_max')
+ )) else None,
+ description=self.param('description'),
+ placement_policy=otypes.VmPlacementPolicy(
+ affinity=otypes.VmAffinity(self.param('placement_policy')),
+ hosts=[
+ otypes.Host(name=self.param('host')),
+ ] if self.param('host') else None,
+ ) if self.param('placement_policy') else None,
+ soundcard_enabled=self.param('soundcard_enabled'),
+ display=otypes.Display(
+ smartcard_enabled=self.param('smartcard_enabled')
+ ) if self.param('smartcard_enabled') is not None else None,
+ io=otypes.Io(
+ threads=self.param('io_threads'),
+ ) if self.param('io_threads') is not None else None,
+ )
+
+ def __attach_watchdog(self, entity):
+ watchdogs_service = self._service.service(entity.id).watchdogs_service()
+ watchdog = self.param('watchdog')
+ if watchdog is not None:
+ current_watchdog = next(iter(watchdogs_service.list()), None)
+ if watchdog.get('model') is None and current_watchdog:
+ watchdogs_service.watchdog_service(current_watchdog.id).remove()
+ return True
+ elif watchdog.get('model') is not None and current_watchdog is None:
+ watchdogs_service.add(
+ otypes.Watchdog(
+ model=otypes.WatchdogModel(watchdog.get('model').lower()),
+ action=otypes.WatchdogAction(watchdog.get('action')),
+ )
+ )
+ return True
+ elif current_watchdog is not None:
+ if (
+ str(current_watchdog.model).lower() != watchdog.get('model').lower() or
+ str(current_watchdog.action).lower() != watchdog.get('action').lower()
+ ):
+ watchdogs_service.watchdog_service(current_watchdog.id).update(
+ otypes.Watchdog(
+ model=otypes.WatchdogModel(watchdog.get('model')),
+ action=otypes.WatchdogAction(watchdog.get('action')),
+ )
+ )
+ return True
+ return False
+
+ def __get_vnic_profile_id(self, nic):
+ """
+ Return VNIC profile ID looked up by it's name, because there can be
+ more VNIC profiles with same name, other criteria of filter is cluster.
+ """
+ vnics_service = self._connection.system_service().vnic_profiles_service()
+ clusters_service = self._connection.system_service().clusters_service()
+ cluster = search_by_name(clusters_service, self.param('cluster'))
+ profiles = [
+ profile for profile in vnics_service.list()
+ if profile.name == nic.get('profile_name')
+ ]
+ cluster_networks = [
+ net.id for net in self._connection.follow_link(cluster.networks)
+ ]
+ try:
+ return next(
+ profile.id for profile in profiles
+ if profile.network.id in cluster_networks
+ )
+ except StopIteration:
+ raise Exception(
+ "Profile '%s' was not found in cluster '%s'" % (
+ nic.get('profile_name'),
+ self.param('cluster')
+ )
+ )
+
+ def __attach_nics(self, entity):
+ # Attach NICs to instance type, if specified:
+ nics_service = self._service.service(entity.id).nics_service()
+ for nic in self.param('nics'):
+ if search_by_name(nics_service, nic.get('name')) is None:
+ if not self._module.check_mode:
+ nics_service.add(
+ otypes.Nic(
+ name=nic.get('name'),
+ interface=otypes.NicInterface(
+ nic.get('interface', 'virtio')
+ ),
+ vnic_profile=otypes.VnicProfile(
+ id=self.__get_vnic_profile_id(nic),
+ ) if nic.get('profile_name') else None,
+ mac=otypes.Mac(
+ address=nic.get('mac_address')
+ ) if nic.get('mac_address') else None,
+ )
+ )
+ self.changed = True
+
+ def __attach_graphical_console(self, entity):
+ graphical_console = self.param('graphical_console')
+ if not graphical_console:
+ return False
+
+ it_service = self._service.instance_type_service(entity.id)
+ gcs_service = it_service.graphics_consoles_service()
+ graphical_consoles = gcs_service.list()
+ # Remove all graphical consoles if there are any:
+ if bool(graphical_console.get('headless_mode')):
+ if not self._module.check_mode:
+ for gc in graphical_consoles:
+ gcs_service.console_service(gc.id).remove()
+ return len(graphical_consoles) > 0
+
+ # If there are not gc add any gc to be added:
+ protocol = graphical_console.get('protocol')
+ if isinstance(protocol, str):
+ protocol = [protocol]
+
+ current_protocols = [str(gc.protocol) for gc in graphical_consoles]
+ if not current_protocols:
+ if not self._module.check_mode:
+ for p in protocol:
+ gcs_service.add(
+ otypes.GraphicsConsole(
+ protocol=otypes.GraphicsType(p),
+ )
+ )
+ return True
+
+ # Update consoles:
+ if sorted(protocol) != sorted(current_protocols):
+ if not self._module.check_mode:
+ for gc in graphical_consoles:
+ gcs_service.console_service(gc.id).remove()
+ for p in protocol:
+ gcs_service.add(
+ otypes.GraphicsConsole(
+ protocol=otypes.GraphicsType(p),
+ )
+ )
+ return True
+
+ def post_update(self, entity):
+ self.post_present(entity.id)
+
+ def post_present(self, entity_id):
+ entity = self._service.service(entity_id).get()
+ self.changed = self.__attach_nics(entity)
+ self.changed = self.__attach_watchdog(entity)
+ self.changed = self.__attach_graphical_console(entity)
+
+ def update_check(self, entity):
+ cpu_mode = getattr(entity.cpu, 'mode')
+ it_display = entity.display
+ return (
+ not self.param('kernel_params_persist') and
+ equal(convert_to_bytes(self.param('memory_guaranteed')), entity.memory_policy.guaranteed) and
+ equal(convert_to_bytes(self.param('memory_max')), entity.memory_policy.max) and
+ equal(self.param('cpu_cores'), entity.cpu.topology.cores) and
+ equal(self.param('cpu_sockets'), entity.cpu.topology.sockets) and
+ equal(self.param('cpu_threads'), entity.cpu.topology.threads) and
+ equal(self.param('cpu_mode'), str(cpu_mode) if cpu_mode else None) and
+ equal(self.param('type'), str(entity.type)) and
+ equal(self.param('name'), str(entity.name)) and
+ equal(self.param('operating_system'), str(entity.os.type)) and
+ equal(self.param('soundcard_enabled'), entity.soundcard_enabled) and
+ equal(self.param('smartcard_enabled'), getattr(it_display, 'smartcard_enabled', False)) and
+ equal(self.param('io_threads'), entity.io.threads) and
+ equal(self.param('ballooning_enabled'), entity.memory_policy.ballooning) and
+ equal(self.param('serial_console'), getattr(entity.console, 'enabled', None)) and
+ equal(self.param('usb_support'), entity.usb.enabled) and
+ equal(self.param('virtio_scsi'), getattr(entity, 'smartcard_enabled', False)) and
+ equal(self.param('high_availability'), entity.high_availability.enabled) and
+ equal(self.param('high_availability_priority'), entity.high_availability.priority) and
+ equal(self.param('boot_devices'), [str(dev) for dev in getattr(entity.os.boot, 'devices', [])]) and
+ equal(self.param('description'), entity.description) and
+ equal(self.param('rng_device'), str(entity.rng_device.source) if entity.rng_device else None) and
+ equal(self.param('rng_bytes'), entity.rng_device.rate.bytes if entity.rng_device else None) and
+ equal(self.param('rng_period'), entity.rng_device.rate.period if entity.rng_device else None) and
+ equal(self.param('placement_policy'), str(entity.placement_policy.affinity) if entity.placement_policy else None)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ name=dict(type='str'),
+ id=dict(type='str'),
+ memory=dict(type='str'),
+ memory_guaranteed=dict(type='str'),
+ memory_max=dict(type='str'),
+ cpu_sockets=dict(type='int'),
+ cpu_cores=dict(type='int'),
+ cpu_threads=dict(type='int'),
+ operating_system=dict(type='str'),
+ boot_devices=dict(type='list', choices=['cdrom', 'hd', 'network'], elements='str'),
+ serial_console=dict(type='bool'),
+ usb_support=dict(type='bool'),
+ high_availability=dict(type='bool'),
+ high_availability_priority=dict(type='int'),
+ watchdog=dict(type='dict'),
+ host=dict(type='str'),
+ graphical_console=dict(type='dict'),
+ description=dict(type='str'),
+ cpu_mode=dict(type='str'),
+ rng_device=dict(type='str'),
+ rng_bytes=dict(type='int', default=None),
+ rng_period=dict(type='int', default=None),
+ placement_policy=dict(type='str'),
+ cpu_pinning=dict(type='list', elements='dict'),
+ soundcard_enabled=dict(type='bool', default=None),
+ virtio_scsi=dict(type='bool', default=None),
+ smartcard_enabled=dict(type='bool', default=None),
+ io_threads=dict(type='int', default=None),
+ nics=dict(type='list', default=[], elements='dict'),
+ ballooning_enabled=dict(type='bool', default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['id', 'name']],
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ state = module.params['state']
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ its_service = connection.system_service().instance_types_service()
+ its_module = InstanceTypeModule(
+ connection=connection,
+ module=module,
+ service=its_service,
+ )
+ it = its_module.search_entity()
+
+ if state == 'present':
+ ret = its_module.create(
+ entity=it
+ )
+ its_module.post_present(ret['id'])
+ ret['changed'] = its_module.changed
+ elif state == 'absent':
+ ret = its_module.remove()
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_job.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_job.py
new file mode 100644
index 00000000..b30129e1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_job.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_job
+short_description: Module to manage jobs in oVirt/RHV
+version_added: "1.0.0"
+author: "Martin Necas (@mnecas)"
+description:
+ - "This module manage jobs in oVirt/RHV. It can also manage steps of the job."
+options:
+ description:
+ description:
+ - "Description of the job."
+ - "When task with same description has already finished and you rerun taks it will create new job."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the job be C(present)/C(absent)/C(failed)."
+ - "C(started) is alias for C(present). C(finished) is alias for C(absent). Same in the steps."
+ - "Note when C(finished)/C(failed) it will finish/fail all steps."
+ choices: ['present', 'absent', 'started', 'finished', 'failed']
+ default: present
+ type: str
+ steps:
+ description:
+ - "The steps of the job."
+ suboptions:
+ description:
+ description:
+ - "Description of the step."
+ required: true
+ state:
+ description:
+ - "Should the step be present/absent/failed."
+ - "Note when one step fail whole job will fail"
+ - "Note when all steps are finished it will finish job."
+ choices: ['present', 'absent', 'started', 'finished', 'failed']
+ default: present
+ type: list
+ elements: dict
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Create job with two steps
+ ovirt.ovirt.ovirt_job:
+ description: job_name
+ steps:
+ - description: step_name_A
+ - description: step_name_B
+
+- name: Finish one step
+ ovirt.ovirt.ovirt_job:
+ description: job_name
+ steps:
+ - description: step_name_A
+ state: finished
+
+- name: When you fail one step whole job will stop
+ ovirt.ovirt.ovirt_job:
+ description: job_name
+ steps:
+ - description: step_name_B
+ state: failed
+
+- name: Finish all steps
+ ovirt.ovirt.ovirt_job:
+ description: job_name
+ state: finished
+'''
+
+RETURN = '''
+id:
+ description: ID of the job which is managed
+ returned: On success if job is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+job:
+ description: "Dictionary of all the job attributes. Job attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/job."
+ returned: On success if job is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ equal,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ get_dict_of_struct,
+)
+
+
+def build_job(description):
+ return otypes.Job(
+ description=description,
+ status=otypes.JobStatus.STARTED,
+ external=True,
+ auto_cleared=True
+ )
+
+
+def build_step(description, job_id):
+ return otypes.Step(
+ description=description,
+ type=otypes.StepEnum.UNKNOWN,
+ job=otypes.Job(
+ id=job_id
+ ),
+ status=otypes.StepStatus.STARTED,
+ external=True,
+ )
+
+
+def attach_steps(module, job_id, jobs_service):
+ changed = False
+ steps_service = jobs_service.job_service(job_id).steps_service()
+ if module.params.get('steps'):
+ for step in module.params.get('steps'):
+ step_entity = get_entity(steps_service, step.get('description'))
+ step_state = step.get('state', 'present')
+ if step_state in ['present', 'started']:
+ if step_entity is None:
+ steps_service.add(build_step(step.get('description'), job_id))
+ changed = True
+ if step_entity is not None and step_entity.status not in [otypes.StepStatus.FINISHED, otypes.StepStatus.FAILED]:
+ if step_state in ['absent', 'finished']:
+ steps_service.step_service(step_entity.id).end(succeeded=True)
+ changed = True
+ elif step_state == 'failed':
+ steps_service.step_service(step_entity.id).end(succeeded=False)
+ changed = True
+ return changed
+
+
+def get_entity(service, description):
+ all_entities = service.list()
+ for entity in all_entities:
+ if entity.description == description and entity.status not in [otypes.StepStatus.FINISHED, otypes.JobStatus.FINISHED]:
+ return entity
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'started', 'finished', 'failed'],
+ default='present',
+ ),
+ description=dict(required=True),
+ steps=dict(default=None, type='list', elements='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ jobs_service = connection.system_service().jobs_service()
+
+ state = module.params['state']
+ job = get_entity(jobs_service, module.params['description'])
+ changed = False
+ if state in ['present', 'started']:
+ if job is None or job.status in [otypes.JobStatus.FINISHED, otypes.JobStatus.FAILED]:
+ job = jobs_service.add(build_job(module.params['description']))
+ changed = True
+ changed = attach_steps(module, job.id, jobs_service) or changed
+
+ if job is not None and job.status not in [otypes.JobStatus.FINISHED, otypes.JobStatus.FAILED]:
+ if state in ['absent', 'finished']:
+ jobs_service.job_service(job.id).end(succeeded=True)
+ changed = True
+
+ elif state == 'failed':
+ jobs_service.job_service(job.id).end(succeeded=False)
+ changed = True
+
+ ret = {
+ 'changed': changed,
+ 'id': getattr(job, 'id', None),
+ 'job': get_dict_of_struct(
+ struct=job,
+ connection=connection,
+ fetch_nested=True,
+ attributes=module.params.get('nested_attributes'),
+ ),
+ }
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_mac_pool.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_mac_pool.py
new file mode 100644
index 00000000..257fbce4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_mac_pool.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_mac_pool
+short_description: Module to manage MAC pools in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "This module manage MAC pools in oVirt/RHV."
+options:
+ id:
+ description:
+ - "ID of the mac pool to manage."
+ type: str
+ name:
+ description:
+ - "Name of the MAC pool to manage."
+ required: true
+ type: str
+ description:
+ description:
+ - "Description of the MAC pool."
+ type: str
+ state:
+ description:
+ - "Should the mac pool be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ allow_duplicates:
+ description:
+ - "If I(true) allow a MAC address to be used multiple times in a pool."
+ - "Default value is set by oVirt/RHV engine to I(false)."
+ type: bool
+ ranges:
+ description:
+ - "List of MAC ranges. The from and to should be split by comma."
+ - "For example: 00:1a:4a:16:01:51,00:1a:4a:16:01:61"
+ type: list
+ elements: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create MAC pool:
+- ovirt.ovirt.ovirt_mac_pool:
+ name: mymacpool
+ allow_duplicates: false
+ ranges:
+ - 00:1a:4a:16:01:51,00:1a:4a:16:01:61
+ - 00:1a:4a:16:02:51,00:1a:4a:16:02:61
+
+# Remove MAC pool:
+- ovirt.ovirt.ovirt_mac_pool:
+ state: absent
+ name: mymacpool
+
+# Change MAC pool Name
+- ovirt.ovirt.ovirt_nic:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_mac_pool_name"
+'''
+
+RETURN = '''
+id:
+ description: ID of the MAC pool which is managed
+ returned: On success if MAC pool is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+template:
+ description: "Dictionary of all the MAC pool attributes. MAC pool attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/mac_pool."
+ returned: On success if MAC pool is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ equal,
+ create_connection,
+ ovirt_full_argument_spec,
+)
+
+
+class MACPoolModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.MacPool(
+ name=self._module.params['name'],
+ id=self._module.params['id'],
+ allow_duplicates=self._module.params['allow_duplicates'],
+ description=self._module.params['description'],
+ ranges=[
+ otypes.Range(
+ from_=mac_range.split(',')[0],
+ to=mac_range.split(',')[1],
+ )
+ for mac_range in self._module.params['ranges']
+ ] if self._module.params['ranges'] else None,
+ )
+
+ def _compare_ranges(self, entity):
+ if self._module.params['ranges'] is not None:
+ ranges = sorted([
+ '%s,%s' % (mac_range.from_, mac_range.to)
+ for mac_range in entity.ranges
+ ])
+ return equal(sorted(self._module.params['ranges']), ranges)
+
+ return True
+
+ def update_check(self, entity):
+ return (
+ self._compare_ranges(entity) and
+ equal(self._module.params['allow_duplicates'], entity.allow_duplicates) and
+ equal(self._module.params['description'], entity.description) and
+ equal(self._module.params['name'], entity.name)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ id=dict(default=None),
+ allow_duplicates=dict(default=None, type='bool'),
+ description=dict(default=None),
+ ranges=dict(default=None, type='list', elements='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ mac_pools_service = connection.system_service().mac_pools_service()
+ mac_pools_module = MACPoolModule(
+ connection=connection,
+ module=module,
+ service=mac_pools_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = mac_pools_module.create()
+ elif state == 'absent':
+ ret = mac_pools_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network.py
new file mode 100644
index 00000000..889914ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_network
+short_description: Module to manage logical networks in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage logical networks in oVirt/RHV"
+options:
+ id:
+ description:
+ - "ID of the network to manage."
+ type: str
+ name:
+ description:
+ - "Name of the network to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the network be present or absent"
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ data_center:
+ description:
+ - "Datacenter name where network reside."
+ type: str
+ required: true
+ description:
+ description:
+ - "Description of the network."
+ type: str
+ comment:
+ description:
+ - "Comment of the network."
+ type: str
+ vlan_tag:
+ description:
+ - "Specify VLAN tag."
+ - "NOTE - To remove the vlan_tag use -1."
+ type: int
+ external_provider:
+ description:
+ - "Name of external network provider."
+ - "At first it tries to import the network when not found it will create network in external provider."
+ type: str
+ vm_network:
+ description:
+ - "If I(True) network will be marked as network for VM."
+ - "VM network carries traffic relevant to the virtual machine."
+ type: bool
+ mtu:
+ description:
+ - "Maximum transmission unit (MTU) of the network."
+ type: int
+ clusters:
+ description:
+ - "List of dictionaries describing how the network is managed in specific cluster."
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Cluster name.
+ assigned:
+ description:
+ - I(true) if the network should be assigned to cluster. Default is I(true).
+ type: bool
+ required:
+ description:
+ - I(true) if the network must remain operational for all hosts associated with this network.
+ type: bool
+ display:
+ description:
+ - I(true) if the network should marked as display network.
+ type: bool
+ migration:
+ description:
+ - I(true) if the network should marked as migration network.
+ type: bool
+ gluster:
+ description:
+ - I(true) if the network should marked as gluster network.
+ type: bool
+ label:
+ description:
+ - "Name of the label to assign to the network."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create network
+- ovirt.ovirt.ovirt_network:
+ data_center: mydatacenter
+ name: mynetwork
+ vlan_tag: 10
+ vm_network: true
+
+# Remove network
+- ovirt.ovirt.ovirt_network:
+ state: absent
+ name: mynetwork
+
+# Change Network Name
+- ovirt.ovirt.ovirt_network:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_network_name"
+ data_center: mydatacenter
+
+# Add network from external provider
+- ovirt.ovirt.ovirt_network:
+ data_center: mydatacenter
+ name: mynetwork
+ external_provider: ovirt-provider-ovn
+
+# Remove vlan_tag
+- ovirt.ovirt.ovirt_network:
+ data_center: mydatacenter
+ name: mynetwork
+ vlan_tag: -1
+'''
+
+RETURN = '''
+id:
+ description: "ID of the managed network"
+ returned: "On success if network is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+network:
+ description: "Dictionary of all the network attributes. Network attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
+ returned: "On success if network is found."
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+ get_id_by_name,
+ get_dict_of_struct,
+ get_entity
+)
+
+
+class NetworksModule(BaseModule):
+ def build_entity(self):
+ if self.param('external_provider'):
+ ons_service = self._connection.system_service().openstack_network_providers_service()
+ on_service = ons_service.provider_service(get_id_by_name(ons_service, self.param('external_provider')))
+ return otypes.Network(
+ name=self._module.params['name'],
+ comment=self._module.params['comment'],
+ description=self._module.params['description'],
+ id=self._module.params['id'],
+ data_center=otypes.DataCenter(
+ name=self._module.params['data_center'],
+ ) if self._module.params['data_center'] else None,
+ vlan=otypes.Vlan(
+ self._module.params['vlan_tag'] if self._module.params['vlan_tag'] != -1 else None,
+ ) if self._module.params['vlan_tag'] is not None else None,
+ usages=[
+ otypes.NetworkUsage.VM if self._module.params['vm_network'] else None
+ ] if self._module.params['vm_network'] is not None else None,
+ mtu=self._module.params['mtu'],
+ external_provider=otypes.OpenStackNetworkProvider(id=on_service.get().id)
+ if self.param('external_provider') else None,
+ )
+
+ def post_create(self, entity):
+ self._update_label_assignments(entity)
+
+ def _update_label_assignments(self, entity):
+ if self.param('label') is None:
+ return
+
+ labels_service = self._service.service(entity.id).network_labels_service()
+ labels = [lbl.id for lbl in labels_service.list()]
+ if not self.param('label') in labels:
+ if not self._module.check_mode:
+ if labels:
+ labels_service.label_service(labels[0]).remove()
+ labels_service.add(
+ label=otypes.NetworkLabel(id=self.param('label'))
+ )
+ self.changed = True
+
+ def update_check(self, entity):
+ self._update_label_assignments(entity)
+ vlan_tag_changed = equal(self._module.params.get('vlan_tag'), getattr(entity.vlan, 'id', None))
+ if self._module.params.get('vlan_tag') == -1:
+ vlan_tag_changed = getattr(entity.vlan, 'id', None) is None
+ return (
+ vlan_tag_changed and
+ equal(self._module.params.get('comment'), entity.comment) and
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('vm_network'), True if entity.usages else False) and
+ equal(self._module.params.get('mtu'), entity.mtu)
+ )
+
+
+class ClusterNetworksModule(BaseModule):
+
+ def __init__(self, network_id, cluster_network, *args, **kwargs):
+ super(ClusterNetworksModule, self).__init__(*args, **kwargs)
+ self._network_id = network_id
+ self._cluster_network = cluster_network
+ self._old_usages = []
+ self._cluster_network_entity = get_entity(self._service.network_service(network_id))
+ if self._cluster_network_entity is not None:
+ self._old_usages = self._cluster_network_entity.usages
+
+ def build_entity(self):
+ return otypes.Network(
+ id=self._network_id,
+ name=self._module.params['name'],
+ required=self._cluster_network.get('required'),
+ display=self._cluster_network.get('display'),
+ usages=list(set([
+ otypes.NetworkUsage(usage)
+ for usage in ['display', 'gluster', 'migration']
+ if self._cluster_network.get(usage, False)
+ ] + self._old_usages))
+ if (
+ self._cluster_network.get('display') is not None or
+ self._cluster_network.get('gluster') is not None or
+ self._cluster_network.get('migration') is not None
+ ) else None,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._cluster_network.get('required'), entity.required) and
+ equal(self._cluster_network.get('display'), entity.display) and
+ all(
+ x in [
+ str(usage)
+ for usage in getattr(entity, 'usages', [])
+ # VM + MANAGEMENT is part of root network
+ if usage != otypes.NetworkUsage.VM and usage != otypes.NetworkUsage.MANAGEMENT
+ ]
+ for x in [
+ usage
+ for usage in ['display', 'gluster', 'migration']
+ if self._cluster_network.get(usage, False)
+ ]
+ )
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ data_center=dict(required=True),
+ id=dict(default=None),
+ name=dict(required=True),
+ description=dict(default=None),
+ comment=dict(default=None),
+ external_provider=dict(default=None),
+ vlan_tag=dict(default=None, type='int'),
+ vm_network=dict(default=None, type='bool'),
+ mtu=dict(default=None, type='int'),
+ clusters=dict(default=None, type='list', elements='dict'),
+ label=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ clusters_service = connection.system_service().clusters_service()
+ networks_service = connection.system_service().networks_service()
+ networks_module = NetworksModule(
+ connection=connection,
+ module=module,
+ service=networks_service,
+ )
+ state = module.params['state']
+ search_params = {
+ 'name': module.params['name'],
+ 'datacenter': module.params['data_center'],
+ }
+ if state == 'present':
+ imported = False
+ if module.params.get('external_provider') and module.params.get('name') not in [net.name for net in networks_service.list()]:
+ # Try to import network
+ ons_service = connection.system_service().openstack_network_providers_service()
+ on_service = ons_service.provider_service(get_id_by_name(ons_service, module.params.get('external_provider')))
+ on_networks_service = on_service.networks_service()
+ if module.params.get('name') in [net.name for net in on_networks_service.list()]:
+ network_service = on_networks_service.network_service(get_id_by_name(on_networks_service, module.params.get('name')))
+ network_service.import_(data_center=otypes.DataCenter(name=module.params.get('data_center')))
+ imported = True
+
+ ret = networks_module.create(search_params=search_params)
+ ret['changed'] = ret['changed'] or imported
+ # Update clusters networks:
+ if module.params.get('clusters') is not None:
+ for param_cluster in module.params.get('clusters'):
+ cluster = search_by_name(clusters_service, param_cluster.get('name'))
+ if cluster is None:
+ raise Exception("Cluster '%s' was not found." % param_cluster.get('name'))
+ cluster_networks_service = clusters_service.service(cluster.id).networks_service()
+ cluster_networks_module = ClusterNetworksModule(
+ network_id=ret['id'],
+ cluster_network=param_cluster,
+ connection=connection,
+ module=module,
+ service=cluster_networks_service,
+ )
+ if param_cluster.get('assigned', True):
+ ret = cluster_networks_module.create()
+ else:
+ ret = cluster_networks_module.remove()
+
+ elif state == 'absent':
+ ret = networks_module.remove(search_params=search_params)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network_info.py
new file mode 100644
index 00000000..40b95cbf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_network_info.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_network_info
+short_description: Retrieve information about one or more oVirt/RHV networks
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV networks."
+ - This module was called C(ovirt_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_network_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_networks), which
+ contains a list of networks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search network starting with string vlan1 use: name=vlan1*"
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all networks which names start with C(vlan1):
+- ovirt.ovirt.ovirt_network_info:
+ pattern: name=vlan1*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_networks }}"
+'''
+
+
+RETURN = '''
+ovirt_networks:
+ description: "List of dictionaries describing the networks. Network attributes are mapped to dictionary keys,
+ all networks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ networks_service = connection.system_service().networks_service()
+ networks = networks_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_networks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in networks
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic.py
new file mode 100644
index 00000000..dc1c1801
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_nic
+short_description: Module to manage network interfaces of Virtual Machines in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - Module to manage network interfaces of Virtual Machines in oVirt/RHV.
+options:
+ id:
+ description:
+ - "ID of the nic to manage."
+ type: str
+ name:
+ description:
+ - Name of the network interface to manage.
+ required: true
+ type: str
+ vm:
+ description:
+ - Name of the Virtual Machine to manage.
+ - You must provide either C(vm) parameter or C(template) parameter.
+ type: str
+ template:
+ description:
+ - Name of the template to manage.
+ - You must provide either C(vm) parameter or C(template) parameter.
+ type: str
+ template_version:
+ description:
+ - Version number of the template.
+ type: int
+ version_added: 1.2.0
+ state:
+ description:
+ - Should the Virtual Machine NIC be present/absent/plugged/unplugged.
+ choices: [ absent, plugged, present, unplugged ]
+ default: present
+ type: str
+ network:
+ description:
+ - Logical network to which the VM network interface should use,
+ by default Empty network is used if network is not specified.
+ type: str
+ profile:
+ description:
+ - Virtual network interface profile to be attached to VM network interface.
+ - When not specified and network has only single profile it will be auto-selected, otherwise you must specify profile.
+ type: str
+ interface:
+ description:
+ - "Type of the network interface. For example e1000, pci_passthrough, rtl8139, rtl8139_virtio, spapr_vlan or virtio."
+ - "It's required parameter when creating the new NIC."
+ type: str
+ mac_address:
+ description:
+ - Custom MAC address of the network interface, by default it's obtained from MAC pool.
+ type: str
+ linked:
+ description:
+ - Defines if the NIC is linked to the virtual machine.
+ type: bool
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Add NIC to VM
+ ovirt.ovirt.ovirt_nic:
+ state: present
+ vm: myvm
+ name: mynic
+ interface: e1000
+ mac_address: 00:1a:4a:16:01:56
+ profile: ovirtmgmt
+ network: ovirtmgmt
+
+- name: Plug NIC to VM
+ ovirt.ovirt.ovirt_nic:
+ state: plugged
+ vm: myvm
+ name: mynic
+
+- name: Unplug NIC from VM
+ ovirt.ovirt.ovirt_nic:
+ state: unplugged
+ linked: false
+ vm: myvm
+ name: mynic
+
+- name: Add NIC to template
+ ovirt.ovirt.ovirt_nic:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ template: my_template
+ name: nic1
+ interface: virtio
+ profile: ovirtmgmt
+ network: ovirtmgmt
+
+- name: Remove NIC from VM
+ ovirt.ovirt.ovirt_nic:
+ state: absent
+ vm: myvm
+ name: mynic
+
+# Change NIC Name
+- ovirt.ovirt.ovirt_nic:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_nic_name"
+ vm: myvm
+'''
+
+RETURN = '''
+id:
+ description: ID of the network interface which is managed
+ returned: On success if network interface is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+nic:
+ description: "Dictionary of all the network interface attributes. Network interface attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
+ returned: On success if network interface is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+class EntityNicsModule(BaseModule):
+
+ def __init__(self, *args, **kwargs):
+ super(EntityNicsModule, self).__init__(*args, **kwargs)
+ self.vnic_id = None
+
+ @property
+ def vnic_id(self):
+ return self._vnic_id
+
+ @vnic_id.setter
+ def vnic_id(self, vnic_id):
+ self._vnic_id = vnic_id
+
+ def build_entity(self):
+ return otypes.Nic(
+ id=self._module.params.get('id'),
+ name=self._module.params.get('name'),
+ interface=otypes.NicInterface(
+ self._module.params.get('interface')
+ ) if self._module.params.get('interface') else None,
+ vnic_profile=otypes.VnicProfile(
+ id=self.vnic_id,
+ ) if self.vnic_id else None,
+ mac=otypes.Mac(
+ address=self._module.params.get('mac_address')
+ ) if self._module.params.get('mac_address') else None,
+ linked=self.param('linked') if self.param('linked') is not None else None,
+ )
+
+ def update_check(self, entity):
+ if self._module.params.get('vm'):
+ return (
+ equal(self._module.params.get('interface'), str(entity.interface)) and
+ equal(self._module.params.get('linked'), entity.linked) and
+ equal(self._module.params.get('name'), str(entity.name)) and
+ equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile)) and
+ equal(self._module.params.get('mac_address'), entity.mac.address)
+ )
+ elif self._module.params.get('template'):
+ return (
+ equal(self._module.params.get('interface'), str(entity.interface)) and
+ equal(self._module.params.get('linked'), entity.linked) and
+ equal(self._module.params.get('name'), str(entity.name)) and
+ equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile))
+ )
+
+
+def get_vnics(networks_service, network, connection):
+ resp = []
+ vnic_services = connection.system_service().vnic_profiles_service()
+ for vnic in vnic_services.list():
+ if vnic.network.id == network.id:
+ resp.append(vnic)
+ return resp
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present', choices=['absent', 'plugged', 'present', 'unplugged']),
+ vm=dict(type='str'),
+ id=dict(default=None),
+ template=dict(type='str'),
+ name=dict(type='str', required=True),
+ interface=dict(type='str'),
+ template_version=dict(type='int', default=None),
+ profile=dict(type='str'),
+ network=dict(type='str'),
+ mac_address=dict(type='str'),
+ linked=dict(type='bool'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['vm', 'template']],
+ )
+
+ check_sdk(module)
+
+ try:
+ # Locate the service that manages the virtual machines and use it to
+ # search for the NIC:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ entity_name = None
+
+ if module.params.get('vm'):
+ # Locate the VM, where we will manage NICs:
+ entity_name = module.params.get('vm')
+ collection_service = connection.system_service().vms_service()
+ elif module.params.get('template'):
+ entity_name = module.params.get('template')
+ collection_service = connection.system_service().templates_service()
+
+ # TODO: We have to modify the search_by_name function to accept raise_error=True/False,
+ if module.params['template_version'] is not None:
+ entity = [
+ t for t in collection_service.list()
+ if t.version.version_number == module.params['template_version']
+ ]
+ if not entity:
+ raise ValueError(
+ "Template with name '%s' and version '%s' was not found'" % (
+ module.params['template'],
+ module.params['template_version']
+ )
+ )
+ entity = entity[0]
+ else:
+ entity = search_by_name(collection_service, entity_name)
+ if entity is None:
+ raise Exception("Vm/Template '%s' was not found." % entity_name)
+
+ service = collection_service.service(entity.id)
+ cluster_id = entity.cluster
+
+ nics_service = service.nics_service()
+ entitynics_module = EntityNicsModule(
+ connection=connection,
+ module=module,
+ service=nics_service,
+ )
+
+ # Find vNIC id of the network interface (if any):
+ if module.params['network']:
+ profile = module.params.get('profile')
+ cluster_name = get_link_name(connection, cluster_id)
+ dcs_service = connection.system_service().data_centers_service()
+ dc = dcs_service.list(search='Clusters.name=%s' % cluster_name)[0]
+ networks_service = dcs_service.service(dc.id).networks_service()
+ network = next(
+ (n for n in networks_service.list()
+ if n.name == module.params['network']),
+ None
+ )
+ if network is None:
+ raise Exception(
+ "Network '%s' was not found in datacenter '%s'." % (
+ module.params['network'],
+ dc.name
+ )
+ )
+ if profile:
+ for vnic in connection.system_service().vnic_profiles_service().list():
+ if vnic.name == profile and vnic.network.id == network.id:
+ entitynics_module.vnic_id = vnic.id
+ else:
+ # When not specified which vnic use ovirtmgmt/ovirtmgmt
+ vnics = get_vnics(networks_service, network, connection)
+ if len(vnics) == 1:
+ entitynics_module.vnic_id = vnics[0].id
+ else:
+ raise Exception(
+ "You didn't specify any vnic profile. "
+ "Following vnic profiles are in system: '%s', please specify one of them" % ([vnic.name for vnic in vnics])
+ )
+ # Handle appropriate action:
+ state = module.params['state']
+ if state == 'present':
+ ret = entitynics_module.create()
+ elif state == 'absent':
+ ret = entitynics_module.remove()
+ elif state == 'plugged':
+ entitynics_module.create()
+ ret = entitynics_module.action(
+ action='activate',
+ action_condition=lambda nic: not nic.plugged,
+ wait_condition=lambda nic: nic.plugged,
+ )
+ elif state == 'unplugged':
+ entitynics_module.create()
+ ret = entitynics_module.action(
+ action='deactivate',
+ action_condition=lambda nic: nic.plugged,
+ wait_condition=lambda nic: not nic.plugged,
+ )
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic_info.py
new file mode 100644
index 00000000..cf90467f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_nic_info.py
@@ -0,0 +1,148 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_nic_info
+short_description: Retrieve information about one or more oVirt/RHV virtual machine network interfaces
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine network interfaces."
+ - This module was called C(ovirt_nic_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_nic_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_nics), which
+ contains a list of NICs. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM where NIC is attached."
+ - You must provide either C(vm) parameter or C(template) parameter.
+ type: str
+ template:
+ description:
+ - "Name of the template where NIC is attached."
+ - You must provide either C(vm) parameter or C(template) parameter.
+ type: str
+ version_added: 1.2.0
+ name:
+ description:
+ - "Name of the NIC, can be used as glob expression."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all NICs which names start with C(eth) for VM named C(centos7):
+- ovirt.ovirt.ovirt_nic_info:
+ vm: centos7
+ name: eth*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_nics }}"
+'''
+
+RETURN = '''
+ovirt_nics:
+ description: "List of dictionaries describing the network interfaces. NIC attributes are mapped to dictionary keys,
+ all NICs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(default=None),
+ template=dict(default=None),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec,
+ required_one_of=[['vm', 'template']],
+ )
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+
+ if module.params.get('vm'):
+ # Locate the VM, where we will manage NICs:
+ entity_name = module.params.get('vm')
+ collection_service = connection.system_service().vms_service()
+ elif module.params.get('template'):
+ entity_name = module.params.get('template')
+ collection_service = connection.system_service().templates_service()
+ entity = search_by_name(collection_service, entity_name)
+ if entity is None:
+ raise Exception("VM/Template '%s' was not found." % entity_name)
+
+ nics_service = collection_service.service(entity.id).nics_service()
+ if module.params['name']:
+ nics = [
+ e for e in nics_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ nics = nics_service.list()
+
+ result = dict(
+ ovirt_nics=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in nics
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission.py
new file mode 100644
index 00000000..774f7105
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission.py
@@ -0,0 +1,329 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_permission
+short_description: Module to manage permissions of users/groups in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - Module to manage permissions of users/groups in oVirt/RHV.
+options:
+ role:
+ description:
+ - Name of the role to be assigned to user/group on specific object.
+ default: UserRole
+ type: str
+ state:
+ description:
+ - Should the permission be present/absent.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ object_id:
+ description:
+ - ID of the object where the permissions should be managed.
+ type: str
+ object_name:
+ description:
+ - Name of the object where the permissions should be managed.
+ type: str
+ object_type:
+ description:
+ - The object where the permissions should be managed.
+ choices:
+ - cluster
+ - cpu_profile
+ - data_center
+ - disk
+ - disk_profile
+ - host
+ - network
+ - storage_domain
+ - system
+ - template
+ - vm
+ - vm_pool
+ - vnic_profile
+ default: vm
+ type: str
+ user_name:
+ description:
+ - Username of the user to manage. In most LDAPs it's I(uid) of the user,
+ but in Active Directory you must specify I(UPN) of the user.
+ - Note that if user does not exist in the system this module will fail,
+ you should ensure the user exists by using M(ovirt.ovirt.ovirt_users) module.
+ type: str
+ group_name:
+ description:
+ - Name of the group to manage.
+ - Note that if group does not exist in the system this module will fail,
+ you should ensure the group exists by using M(ovirt.ovirt.ovirt_groups) module.
+ type: str
+ authz_name:
+ description:
+ - Authorization provider of the user/group.
+ required: true
+ aliases: [ domain ]
+ type: str
+ namespace:
+ description:
+ - Namespace of the authorization provider, where user/group resides.
+ type: str
+ quota_name:
+ description:
+ - Name of the quota to assign permission. Works only with C(object_type) I(data_center).
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Add user user1 from authorization provider example.com-authz
+ ovirt.ovirt.ovirt_permission:
+ user_name: user1
+ authz_name: example.com-authz
+ object_type: vm
+ object_name: myvm
+ role: UserVmManager
+
+- name: Remove permission from user
+ ovirt.ovirt.ovirt_permission:
+ state: absent
+ user_name: user1
+ authz_name: example.com-authz
+ object_type: cluster
+ object_name: mycluster
+ role: ClusterAdmin
+
+- name: Assign QuotaConsumer role to user
+ ovirt.ovirt.ovirt_permissions:
+ state: present
+ user_name: user1
+ authz_name: example.com-authz
+ object_type: data_center
+ object_name: mydatacenter
+ quota_name: myquota
+ role: QuotaConsumer
+
+- name: Assign QuotaConsumer role to group
+ ovirt.ovirt.ovirt_permissions:
+ state: present
+ group_name: group1
+ authz_name: example.com-authz
+ object_type: data_center
+ object_name: mydatacenter
+ quota_name: myquota
+ role: QuotaConsumer
+'''
+
+RETURN = '''
+id:
+ description: ID of the permission which is managed
+ returned: On success if permission is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+permission:
+ description: "Dictionary of all the permission attributes. Permission attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
+ returned: On success if permission is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ follow_link,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+ get_id_by_name
+)
+
+
+def _objects_service(connection, object_type):
+ if object_type == 'system':
+ return connection.system_service()
+
+ return getattr(
+ connection.system_service(),
+ '%ss_service' % object_type,
+ None,
+ )()
+
+
+def _object_service(connection, module):
+ object_type = module.params['object_type']
+ objects_service = _objects_service(connection, object_type)
+ if object_type == 'system':
+ return objects_service
+
+ object_id = module.params['object_id']
+ if object_id is None:
+ sdk_object = search_by_name(objects_service, module.params['object_name'])
+ if sdk_object is None:
+ raise Exception(
+ "'%s' object '%s' was not found." % (
+ module.params['object_type'],
+ module.params['object_name']
+ )
+ )
+ object_id = sdk_object.id
+
+ object_service = objects_service.service(object_id)
+ if module.params['quota_name'] and object_type == 'data_center':
+ quotas_service = object_service.quotas_service()
+ return quotas_service.quota_service(get_id_by_name(quotas_service, module.params['quota_name']))
+ return object_service
+
+
+def _permission(module, permissions_service, connection):
+ for permission in permissions_service.list():
+ user = follow_link(connection, permission.user)
+ if (
+ equal(module.params['user_name'], user.principal if user else None) and
+ equal(module.params['group_name'], get_link_name(connection, permission.group)) and
+ equal(module.params['role'], get_link_name(connection, permission.role))
+ ):
+ return permission
+
+
+class PermissionsModule(BaseModule):
+
+ def _user(self):
+ user = search_by_attributes(
+ self._connection.system_service().users_service(),
+ usrname="{name}@{authz_name}".format(
+ name=self._module.params['user_name'],
+ authz_name=self._module.params['authz_name'],
+ ),
+ )
+ if user is None:
+ raise Exception("User '%s' was not found." % self._module.params['user_name'])
+ return user
+
+ def _group(self):
+ groups = self._connection.system_service().groups_service().list(
+ search="name={name}".format(
+ name=self._module.params['group_name'],
+ )
+ )
+
+ # If found more groups, filter them by namespace and authz name:
+ # (filtering here, as oVirt/RHV backend doesn't support it)
+ if len(groups) > 1:
+ groups = [
+ g for g in groups if (
+ equal(self._module.params['namespace'], g.namespace) and
+ equal(self._module.params['authz_name'], g.domain.name)
+ )
+ ]
+ if not groups:
+ raise Exception("Group '%s' was not found." % self._module.params['group_name'])
+ return groups[0]
+
+ def build_entity(self):
+ entity = self._group() if self._module.params['group_name'] else self._user()
+
+ return otypes.Permission(
+ user=otypes.User(
+ id=entity.id
+ ) if self._module.params['user_name'] else None,
+ group=otypes.Group(
+ id=entity.id
+ ) if self._module.params['group_name'] else None,
+ role=otypes.Role(
+ name=self._module.params['role']
+ ),
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ role=dict(type='str', default='UserRole'),
+ object_type=dict(type='str', default='vm',
+ choices=[
+ 'cluster',
+ 'cpu_profile',
+ 'data_center',
+ 'disk',
+ 'disk_profile',
+ 'host',
+ 'network',
+ 'storage_domain',
+ 'system',
+ 'template',
+ 'vm',
+ 'vm_pool',
+ 'vnic_profile',
+ ]),
+ authz_name=dict(type='str', required=True, aliases=['domain']),
+ object_id=dict(type='str'),
+ object_name=dict(type='str'),
+ user_name=dict(type='str'),
+ group_name=dict(type='str'),
+ namespace=dict(type='str'),
+ quota_name=dict(type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ if (module.params['object_name'] is None and module.params['object_id'] is None) and module.params['object_type'] != 'system':
+ module.fail_json(msg='"object_name" or "object_id" is required')
+
+ if module.params['user_name'] is None and module.params['group_name'] is None:
+ module.fail_json(msg='"user_name" or "group_name" is required')
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ permissions_service = _object_service(connection, module).permissions_service()
+ permissions_module = PermissionsModule(
+ connection=connection,
+ module=module,
+ service=permissions_service,
+ )
+
+ permission = _permission(module, permissions_service, connection)
+ state = module.params['state']
+ if state == 'present':
+ ret = permissions_module.create(entity=permission)
+ elif state == 'absent':
+ ret = permissions_module.remove(entity=permission)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission_info.py
new file mode 100644
index 00000000..dba50477
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_permission_info.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_permission_info
+short_description: Retrieve information about one or more oVirt/RHV permissions
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV permissions."
+ - This module was called C(ovirt_permission_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_permission_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_permissions), which
+ contains a list of permissions. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ user_name:
+ description:
+ - "Username of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
+ type: str
+ group_name:
+ description:
+ - "Name of the group to manage."
+ type: str
+ authz_name:
+ description:
+ - "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain."
+ type: str
+ required: true
+ aliases: ['domain']
+ namespace:
+ description:
+ - "Namespace of the authorization provider, where user/group resides."
+ required: false
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all permissions of user with username C(john):
+- ovirt.ovirt.ovirt_permission_info:
+ user_name: john
+ authz_name: example.com-authz
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_permissions }}"
+'''
+
+RETURN = '''
+ovirt_permissions:
+ description: "List of dictionaries describing the permissions. Permission attributes are mapped to dictionary keys,
+ all permissions attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_link_name,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def _permissions_service(connection, module):
+ if module.params['user_name']:
+ service = connection.system_service().users_service()
+ entity = next(
+ iter(
+ service.list(
+ search='usrname={0}'.format(
+ '{0}@{1}'.format(module.params['user_name'], module.params['authz_name'])
+ )
+ )
+ ),
+ None
+ )
+ else:
+ service = connection.system_service().groups_service()
+ entity = search_by_name(service, module.params['group_name'])
+
+ if entity is None:
+ raise Exception("User/Group wasn't found.")
+
+ return service.service(entity.id).permissions_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ authz_name=dict(required=True, aliases=['domain']),
+ user_name=dict(default=None),
+ group_name=dict(default=None),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ permissions_service = _permissions_service(connection, module)
+ permissions = []
+ for p in permissions_service.list():
+ newperm = dict()
+ for key, value in p.__dict__.items():
+ if value and isinstance(value, sdk.Struct):
+ newperm[key[1:]] = get_link_name(connection, value)
+ newperm['%s_id' % key[1:]] = value.id
+ permissions.append(newperm)
+
+ result = dict(ovirt_permissions=permissions)
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota.py
new file mode 100644
index 00000000..92a7f2eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_quota
+short_description: Module to manage datacenter quotas in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage datacenter quotas in oVirt/RHV"
+options:
+ id:
+ description:
+ - "ID of the quota to manage."
+ type: str
+ name:
+ description:
+ - "Name of the quota to manage."
+ type: str
+ required: true
+ state:
+ description:
+ - "Should the quota be present/absent."
+ type: str
+ choices: ['present', 'absent']
+ default: present
+ data_center:
+ description:
+ - "Name of the datacenter where quota should be managed."
+ type: str
+ required: true
+ description:
+ description:
+ - "Description of the quota to manage."
+ type: str
+ cluster_threshold:
+ description:
+ - "Cluster threshold(soft limit) defined in percentage (0-100)."
+ type: int
+ aliases:
+ - "cluster_soft_limit"
+ cluster_grace:
+ description:
+ - "Cluster grace(hard limit) defined in percentage (1-100)."
+ type: int
+ aliases:
+ - "cluster_hard_limit"
+ storage_threshold:
+ description:
+ - "Storage threshold(soft limit) defined in percentage (0-100)."
+ type: int
+ aliases:
+ - "storage_soft_limit"
+ storage_grace:
+ description:
+ - "Storage grace(hard limit) defined in percentage (1-100)."
+ type: int
+ aliases:
+ - "storage_hard_limit"
+ clusters:
+ description:
+ - "List of dictionary of cluster limits, which is valid to specific cluster."
+ - "If cluster isn't specified it's valid to all clusters in system:"
+ type: list
+ elements: dict
+ suboptions:
+ cluster:
+ description:
+ - Name of the cluster.
+ memory:
+ description:
+ - Memory limit (in GiB).
+ cpu:
+ description:
+ - CPU limit.
+ storages:
+ description:
+ - "List of dictionary of storage limits, which is valid to specific storage."
+ - "If storage isn't specified it's valid to all storages in system:"
+ type: list
+ elements: dict
+ suboptions:
+ storage:
+ description:
+ - Name of the storage.
+ size:
+ description:
+ - Size limit (in GiB).
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add cluster quota to cluster cluster1 with memory limit 20GiB and CPU limit to 10:
+- ovirt.ovirt.ovirt_quota:
+ name: quota1
+ data_center: dcX
+ clusters:
+ - name: cluster1
+ memory: 20
+ cpu: 10
+
+# Add cluster quota to all clusters with memory limit 30GiB and CPU limit to 15:
+- ovirt.ovirt.ovirt_quota:
+ name: quota2
+ data_center: dcX
+ clusters:
+ - memory: 30
+ cpu: 15
+
+# Add storage quota to storage data1 with size limit to 100GiB
+- ovirt.ovirt.ovirt_quota:
+ name: quota3
+ data_center: dcX
+ storage_grace: 40
+ storage_threshold: 60
+ storages:
+ - name: data1
+ size: 100
+
+# Remove quota quota1 (Note the quota must not be assigned to any VM/disk):
+- ovirt.ovirt.ovirt_quota:
+ state: absent
+ data_center: dcX
+ name: quota1
+
+# Change Quota Name
+- ovirt.ovirt.ovirt_quota:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_quota_name"
+ data_center: dcX
+'''
+
+RETURN = '''
+id:
+ description: ID of the quota which is managed
+ returned: On success if quota is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+quota:
+ description: "Dictionary of all the quota attributes. Quota attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/quota."
+ returned: On success if quota is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+class QuotasModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Quota(
+ description=self._module.params['description'],
+ name=self._module.params['name'],
+ id=self._module.params['id'],
+ storage_hard_limit_pct=self._module.params.get('storage_grace'),
+ storage_soft_limit_pct=self._module.params.get('storage_threshold'),
+ cluster_hard_limit_pct=self._module.params.get('cluster_grace'),
+ cluster_soft_limit_pct=self._module.params.get('cluster_threshold'),
+ )
+
+ def update_storage_limits(self, entity):
+ new_limits = {}
+ for storage in self._module.params.get('storages'):
+ new_limits[storage.get('name', '')] = {
+ 'size': storage.get('size'),
+ }
+
+ old_limits = {}
+ sd_limit_service = self._service.service(entity.id).quota_storage_limits_service()
+ for limit in sd_limit_service.list():
+ storage = get_link_name(self._connection, limit.storage_domain) if limit.storage_domain else ''
+ old_limits[storage] = {
+ 'size': limit.limit,
+ }
+ sd_limit_service.service(limit.id).remove()
+
+ return new_limits == old_limits
+
+ def update_cluster_limits(self, entity):
+ new_limits = {}
+ for cluster in self._module.params.get('clusters'):
+ new_limits[cluster.get('name', '')] = {
+ 'cpu': int(cluster.get('cpu')),
+ 'memory': float(cluster.get('memory')),
+ }
+
+ old_limits = {}
+ cl_limit_service = self._service.service(entity.id).quota_cluster_limits_service()
+ for limit in cl_limit_service.list():
+ cluster = get_link_name(self._connection, limit.cluster) if limit.cluster else ''
+ old_limits[cluster] = {
+ 'cpu': limit.vcpu_limit,
+ 'memory': limit.memory_limit,
+ }
+ cl_limit_service.service(limit.id).remove()
+
+ return new_limits == old_limits
+
+ def update_check(self, entity):
+ # -- FIXME --
+ # Note that we here always remove all cluster/storage limits, because
+ # it's not currently possible to update them and then re-create the limits
+ # appropriately, this shouldn't have any side-effects, but it's not considered
+ # as a correct approach.
+ # This feature is tracked here: https://bugzilla.redhat.com/show_bug.cgi?id=1398576
+ #
+
+ return (
+ self.update_storage_limits(entity) and
+ self.update_cluster_limits(entity) and
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('storage_grace'), entity.storage_hard_limit_pct) and
+ equal(self._module.params.get('storage_threshold'), entity.storage_soft_limit_pct) and
+ equal(self._module.params.get('cluster_grace'), entity.cluster_hard_limit_pct) and
+ equal(self._module.params.get('cluster_threshold'), entity.cluster_soft_limit_pct)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(required=True),
+ data_center=dict(required=True),
+ description=dict(default=None),
+ cluster_threshold=dict(default=None, type='int', aliases=['cluster_soft_limit']),
+ cluster_grace=dict(default=None, type='int', aliases=['cluster_hard_limit']),
+ storage_threshold=dict(default=None, type='int', aliases=['storage_soft_limit']),
+ storage_grace=dict(default=None, type='int', aliases=['storage_hard_limit']),
+ clusters=dict(default=[], type='list', elements='dict'),
+ storages=dict(default=[], type='list', elements='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ dc_name = module.params['data_center']
+ dc_id = getattr(search_by_name(datacenters_service, dc_name), 'id', None)
+ if dc_id is None:
+ raise Exception("Datacenter '%s' was not found." % dc_name)
+
+ quotas_service = datacenters_service.service(dc_id).quotas_service()
+ quotas_module = QuotasModule(
+ connection=connection,
+ module=module,
+ service=quotas_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = quotas_module.create()
+
+ # Manage cluster limits:
+ cl_limit_service = quotas_service.service(ret['id']).quota_cluster_limits_service()
+ for cluster in module.params.get('clusters'):
+ cl_limit_service.add(
+ limit=otypes.QuotaClusterLimit(
+ memory_limit=float(cluster.get('memory')),
+ vcpu_limit=int(cluster.get('cpu')),
+ cluster=search_by_name(
+ connection.system_service().clusters_service(),
+ cluster.get('name')
+ ),
+ ),
+ )
+
+ # Manage storage limits:
+ sd_limit_service = quotas_service.service(ret['id']).quota_storage_limits_service()
+ for storage in module.params.get('storages'):
+ sd_limit_service.add(
+ limit=otypes.QuotaStorageLimit(
+ limit=storage.get('size'),
+ storage_domain=search_by_name(
+ connection.system_service().storage_domains_service(),
+ storage.get('name')
+ ),
+ )
+ )
+
+ elif state == 'absent':
+ ret = quotas_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota_info.py
new file mode 100644
index 00000000..1bd1ca2c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_quota_info.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_quota_info
+short_description: Retrieve information about one or more oVirt/RHV quotas
+version_added: "1.0.0"
+author: "Maor Lipchuk (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV quotas."
+ - This module was called C(ovirt_quota_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_quota_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_quotas), which
+ contains a list of quotas. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ data_center:
+ description:
+ - "Name of the datacenter where quota resides."
+ required: true
+ type: str
+ name:
+ description:
+ - "Name of the quota, can be used as glob expression."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about quota named C<myquota> in Default datacenter:
+- ovirt.ovirt.ovirt_quota_info:
+ data_center: Default
+ name: myquota
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_quotas }}"
+'''
+
+RETURN = '''
+ovirt_quotas:
+ description: "List of dictionaries describing the quotas. Quota attributes are mapped to dictionary keys,
+ all quotas attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/quota."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ data_center=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ dc_name = module.params['data_center']
+ dc = search_by_name(datacenters_service, dc_name)
+ if dc is None:
+ raise Exception("Datacenter '%s' was not found." % dc_name)
+
+ quotas_service = datacenters_service.service(dc.id).quotas_service()
+ if module.params['name']:
+ quotas = [
+ e for e in quotas_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ quotas = quotas_service.list()
+
+ result = dict(
+ ovirt_quotas=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in quotas
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_role.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_role.py
new file mode 100644
index 00000000..a205a00f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_role.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_role
+short_description: Module to manage roles in oVirt/RHV
+version_added: "1.0.0"
+author: "Martin Necas (@mnecas)"
+description:
+ - "Module to manage roles in oVirt/RHV."
+options:
+ name:
+ description:
+ - "Name of the role to manage."
+ type: str
+ id:
+ description:
+ - "ID of the role to manage."
+ type: str
+ description:
+ description:
+ - "Description of the role."
+ type: str
+ state:
+ description:
+ - "Should the role be present/absent."
+ choices: ['present', 'absent']
+ type: str
+ default: present
+ administrative:
+ description:
+ - "Defines the role as administrative-only or not."
+ type: bool
+ permits:
+ description:
+ - "List of permits which role will have"
+ - "Permit 'login' is default and all roles will have it."
+ - "List can contain name of permit."
+ type: list
+ elements: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create administrative role with two permits
+- ovirt.ovirt.ovirt_role:
+ name: role
+ administrative: true
+ permits:
+ - manipulate_permissions
+ - create_instance
+
+# Remove role
+- ovirt.ovirt.ovirt_role:
+ name: role
+ state: absent
+
+# Remove all permit
+- ovirt.ovirt.ovirt_role:
+ name: role
+ administrative: ture
+ permits:
+ - login
+'''
+
+RETURN = '''
+ovirt_role:
+ description: "List of dictionaries describing the Roles. Role attributes are mapped to dictionary keys,
+ all Roles attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/role."
+ returned: On success.
+ type: list
+'''
+
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ convert_to_bytes,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ get_link_name,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+)
+from ansible.module_utils.basic import AnsibleModule
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+
+class RoleModule(BaseModule):
+ def build_entity(self):
+ if 'login' not in self.param('permits'):
+ self.param('permits').append('login')
+ all_permits = self.get_all_permits()
+ return otypes.Role(
+ id=self.param('id'),
+ name=self.param('name'),
+ administrative=self.param('administrative') if self.param(
+ 'administrative') else None,
+ permits=[
+ otypes.Permit(id=all_permits.get(new_permit)) for new_permit in self.param('permits')
+ ] if self.param('permits') else None,
+ description=self.param('description') if self.param('administrative') else None,
+ )
+
+ def get_all_permits(self):
+ return dict((permit.name, permit.id) for permit in self._connection.system_service().cluster_levels_service().level_service('4.3').get().permits)
+
+ def update_check(self, entity):
+ def check_permits():
+ if self.param('permits'):
+ if 'login' not in self.param('permits'):
+ self.param('permits').append('login')
+ permits_service = self._service.service(entity.id).permits_service()
+ current = [er.name for er in permits_service.list()]
+ passed = self.param('permits')
+ if not sorted(current) == sorted(passed):
+ if self._module.check_mode:
+ return False
+ # remove all
+ for permit in permits_service.list():
+ permits_service.permit_service(permit.id).remove()
+ # add passed permits
+ all_permits = self.get_all_permits()
+ for new_permit in passed:
+ permits_service.add(otypes.Permit(id=all_permits.get(new_permit)))
+ return False
+ return True
+
+ return (
+ check_permits() and
+ equal(self.param('administrative'), entity.administrative) and
+ equal(self.param('description'), entity.description)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(default=None),
+ description=dict(default=None),
+ administrative=dict(type='bool', default=False),
+ permits=dict(type='list', default=[], elements='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['id', 'name']],
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ roles_service = connection.system_service().roles_service()
+ roles_module = RoleModule(
+ connection=connection,
+ module=module,
+ service=roles_service,
+ )
+ state = module.params['state']
+ if state == 'present':
+ ret = roles_module.create()
+ elif state == 'absent':
+ ret = roles_module.remove()
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_scheduling_policy_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_scheduling_policy_info.py
new file mode 100644
index 00000000..3e97d920
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_scheduling_policy_info.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_scheduling_policy_info
+short_description: Retrieve information about one or more oVirt scheduling policies
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt scheduling policies."
+ - This module was called C(ovirt_scheduling_policy_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_scheduling_policy_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_scheduling_policies),
+ which contains a list of scheduling policies. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ id:
+ description:
+ - "ID of the scheduling policy."
+ type: str
+ name:
+ description:
+ - "Name of the scheduling policy, can be used as glob expression."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all scheduling policies with name InClusterUpgrade:
+- ovirt.ovirt.ovirt_scheduling_policy_info:
+ name: InClusterUpgrade
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_scheduling_policies }}"
+'''
+
+RETURN = '''
+ovirt_scheduling_policies:
+ description: "List of dictionaries describing the scheduling policies.
+ Scheduling policies attributes are mapped to dictionary keys,
+ all scheduling policies attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ id=dict(default=None),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ system_service = connection.system_service()
+ sched_policies_service = system_service.scheduling_policies_service()
+ if module.params['name']:
+ sched_policies = [
+ e for e in sched_policies_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ elif module.params['id']:
+ sched_policies = [
+ sched_policies_service.service(module.params['id']).get()
+ ]
+ else:
+ sched_policies = sched_policies_service.list()
+
+ result = dict(
+ ovirt_scheduling_policies=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in sched_policies
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot.py
new file mode 100644
index 00000000..12ae2e3c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot.py
@@ -0,0 +1,556 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_snapshot
+short_description: "Module to manage Virtual Machine Snapshots in oVirt/RHV"
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage Virtual Machine Snapshots in oVirt/RHV"
+options:
+ snapshot_id:
+ description:
+ - "ID of the snapshot to manage."
+ type: str
+ vm_name:
+ description:
+ - "Name of the Virtual Machine to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the Virtual Machine snapshot be restore/present/absent."
+ choices: ['restore', 'present', 'absent']
+ default: present
+ type: str
+ description:
+ description:
+ - "Description of the snapshot."
+ type: str
+ disk_id:
+ description:
+ - "Disk id which you want to upload or download"
+ - "To get disk, you need to define disk_id or disk_name"
+ type: str
+ disk_name:
+ description:
+ - "Disk name which you want to upload or download"
+ type: str
+ download_image_path:
+ description:
+ - "Path on a file system where snapshot should be downloaded."
+ - "Note that you must have an valid oVirt/RHV engine CA in your system trust store
+ or you must provide it in C(ca_file) parameter."
+ - "Note that the snapshot is not downloaded when the file already exists,
+ but you can forcibly download the snapshot when using C(force) I (true)."
+ type: str
+ upload_image_path:
+ description:
+ - "Path to disk image, which should be uploaded."
+ type: str
+ use_memory:
+ description:
+ - "If I(true) and C(state) is I(present) save memory of the Virtual
+ Machine if it's running."
+ - "If I(true) and C(state) is I(restore) restore memory of the
+ Virtual Machine."
+ - "Note that Virtual Machine will be paused while saving the memory."
+ aliases:
+ - "restore_memory"
+ - "save_memory"
+ type: bool
+ keep_days_old:
+ description:
+ - "Number of days after which should snapshot be deleted."
+ - "It will check all snapshots of virtual machine and delete them, if they are older."
+ type: int
+ disks:
+ description:
+ - "List of disks which should be created with snapshot."
+ suboptions:
+ id:
+ description:
+ - "Id of the disk which should will be created."
+ type: str
+ name:
+ description:
+ - "Name of the disk which should will be created."
+ type: str
+ type: list
+ elements: dict
+notes:
+ - "Note that without a guest agent the data on the created snapshot may be
+ inconsistent."
+ - "Deleting a snapshot does not remove any information from the virtual
+ machine - it simply removes a return-point. However, restoring a virtual
+ machine from a snapshot deletes any content that was written to the
+ virtual machine after the time the snapshot was taken."
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create snapshot:
+- ovirt.ovirt.ovirt_snapshot:
+ vm_name: rhel7
+ description: MySnapshot
+ register: snapshot
+
+# Create snapshot and save memory:
+- ovirt.ovirt.ovirt_snapshot:
+ vm_name: rhel7
+ description: SnapWithMem
+ use_memory: true
+ register: snapshot
+
+# Restore snapshot:
+- ovirt.ovirt.ovirt_snapshot:
+ state: restore
+ vm_name: rhel7
+ snapshot_id: "{{ snapshot.id }}"
+
+# Remove snapshot:
+- ovirt.ovirt.ovirt_snapshot:
+ state: absent
+ vm_name: rhel7
+ snapshot_id: "{{ snapshot.id }}"
+
+# Upload local image to disk and attach it to vm:
+# Since Ansible 2.8
+- ovirt.ovirt.ovirt_snapshot:
+ name: mydisk
+ vm_name: myvm
+ upload_image_path: /path/to/mydisk.qcow2
+
+# Download snapshot to local file system:
+# Since Ansible 2.8
+- ovirt.ovirt.ovirt_snapshot:
+ snapshot_id: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+ disk_name: DiskName
+ vm_name: myvm
+ download_image_path: /home/user/mysnaphost.qcow2
+
+# Delete all snapshots older than 2 days
+- ovirt.ovirt.ovirt_snapshot:
+ vm_name: test
+ keep_days_old: 2
+
+- name: Select which disks should be add to snapshot
+ ovirt.ovirt.ovirt_snapshot:
+ vm_name: test
+ disks:
+ - id: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+ - name: my_disk_name
+'''
+
+
+RETURN = '''
+id:
+ description: ID of the snapshot which is managed
+ returned: On success if snapshot is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+snapshot:
+ description: "Dictionary of all the snapshot attributes. Snapshot attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
+ returned: On success if snapshot is found.
+ type: dict
+snapshots:
+ description: List of deleted snapshots when keep_days_old is defined and snapshot is older than the input days
+ returned: On success returns deleted snapshots
+ type: list
+'''
+
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+
+import os
+import ssl
+import time
+
+from ansible.module_utils.six.moves.http_client import HTTPSConnection, IncompleteRead
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+from datetime import datetime
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ get_entity,
+ ovirt_full_argument_spec,
+ search_by_name,
+ wait,
+ get_id_by_name,
+ get_link_name
+)
+
+
+def transfer(connection, module, direction, transfer_func):
+ transfers_service = connection.system_service().image_transfers_service()
+ transfer = transfers_service.add(
+ otypes.ImageTransfer(
+ image=otypes.Image(
+ id=module.params['disk_id'],
+ ),
+ direction=direction,
+ )
+ )
+ transfer_service = transfers_service.image_transfer_service(transfer.id)
+
+ try:
+ # After adding a new transfer for the disk, the transfer's status will be INITIALIZING.
+ # Wait until the init phase is over. The actual transfer can start when its status is "Transferring".
+ while transfer.phase == otypes.ImageTransferPhase.INITIALIZING:
+ time.sleep(module.params['poll_interval'])
+ transfer = transfer_service.get()
+
+ proxy_url = urlparse(transfer.proxy_url)
+ context = ssl.create_default_context()
+ auth = module.params['auth']
+ if auth.get('insecure'):
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+ elif auth.get('ca_file'):
+ context.load_verify_locations(cafile=auth.get('ca_file'))
+
+ proxy_connection = HTTPSConnection(
+ proxy_url.hostname,
+ proxy_url.port,
+ context=context,
+ )
+
+ transfer_func(
+ transfer_service,
+ proxy_connection,
+ proxy_url,
+ transfer.signed_ticket
+ )
+ return True
+ finally:
+ transfer_service.finalize()
+ while transfer.phase in [
+ otypes.ImageTransferPhase.TRANSFERRING,
+ otypes.ImageTransferPhase.FINALIZING_SUCCESS,
+ ]:
+ time.sleep(module.params['poll_interval'])
+ transfer = transfer_service.get()
+ if transfer.phase in [
+ otypes.ImageTransferPhase.UNKNOWN,
+ otypes.ImageTransferPhase.FINISHED_FAILURE,
+ otypes.ImageTransferPhase.FINALIZING_FAILURE,
+ otypes.ImageTransferPhase.CANCELLED,
+ ]:
+ raise Exception(
+ "Error occurred while uploading image. The transfer is in %s" % transfer.phase
+ )
+ if module.params.get('logical_unit'):
+ disks_service = connection.system_service().disks_service()
+ wait(
+ service=disks_service.service(module.params['id']),
+ condition=lambda d: d.status == otypes.DiskStatus.OK,
+ wait=module.params['wait'],
+ timeout=module.params['timeout'],
+ )
+
+
+def upload_disk_image(connection, module):
+ def _transfer(transfer_service, proxy_connection, proxy_url, transfer_ticket):
+ BUF_SIZE = 128 * 1024
+ path = module.params['upload_image_path']
+
+ image_size = os.path.getsize(path)
+ proxy_connection.putrequest("PUT", proxy_url.path)
+ proxy_connection.putheader('Content-Length', "%d" % (image_size,))
+ proxy_connection.endheaders()
+ with open(path, "rb") as disk:
+ pos = 0
+ while pos < image_size:
+ to_read = min(image_size - pos, BUF_SIZE)
+ chunk = disk.read(to_read)
+ if not chunk:
+ transfer_service.pause()
+ raise RuntimeError("Unexpected end of file at pos=%d" % pos)
+ proxy_connection.send(chunk)
+ pos += len(chunk)
+
+ return transfer(
+ connection,
+ module,
+ otypes.ImageTransferDirection.UPLOAD,
+ transfer_func=_transfer,
+ )
+
+
+def download_disk_image(connection, module):
+ def _transfer(transfer_service, proxy_connection, proxy_url, transfer_ticket):
+ BUF_SIZE = 128 * 1024
+ transfer_headers = {
+ 'Authorization': transfer_ticket,
+ }
+ proxy_connection.request(
+ 'GET',
+ proxy_url.path,
+ headers=transfer_headers,
+ )
+ r = proxy_connection.getresponse()
+ path = module.params["download_image_path"]
+ image_size = int(r.getheader('Content-Length'))
+ with open(path, "wb") as mydisk:
+ pos = 0
+ while pos < image_size:
+ to_read = min(image_size - pos, BUF_SIZE)
+ chunk = r.read(to_read)
+ if not chunk:
+ raise RuntimeError("Socket disconnected")
+ mydisk.write(chunk)
+ pos += len(chunk)
+
+ return transfer(
+ connection,
+ module,
+ otypes.ImageTransferDirection.DOWNLOAD,
+ transfer_func=_transfer,
+ )
+
+
+def get_disk_attachment(disk, disk_attachments, connection):
+ for disk_attachment in disk_attachments:
+ if get_link_name(connection, disk_attachment.disk) == disk.get('name') or\
+ disk_attachment.disk.id == disk.get('id'):
+ return disk_attachment
+
+
+def create_snapshot(module, vm_service, snapshots_service, connection):
+ changed = False
+ snapshot = get_entity(
+ snapshots_service.snapshot_service(module.params['snapshot_id'])
+ )
+ if snapshot is None:
+ if not module.check_mode:
+ disk_attachments_id = set(
+ get_disk_attachment(disk, vm_service.disk_attachments_service().list(), connection).id
+ for disk in module.params.get('disks')
+ ) if module.params.get('disks') else None
+
+ snapshot = snapshots_service.add(
+ otypes.Snapshot(
+ description=module.params.get('description'),
+ persist_memorystate=module.params.get('use_memory'),
+ disk_attachments=[otypes.DiskAttachment(disk=otypes.Disk(id=da_id)) for da_id in disk_attachments_id] if disk_attachments_id else None
+ )
+ )
+ changed = True
+ wait(
+ service=snapshots_service.snapshot_service(snapshot.id),
+ condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK,
+ wait=module.params['wait'],
+ timeout=module.params['timeout'],
+ )
+ return {
+ 'changed': changed,
+ 'id': snapshot.id,
+ 'snapshot': get_dict_of_struct(snapshot),
+ }
+
+
+def remove_snapshot(module, vm_service, snapshots_service, snapshot_id=None):
+ changed = False
+ if not snapshot_id:
+ snapshot_id = module.params['snapshot_id']
+ snapshot = get_entity(
+ snapshots_service.snapshot_service(snapshot_id)
+ )
+
+ if snapshot:
+ snapshot_service = snapshots_service.snapshot_service(snapshot.id)
+ if not module.check_mode:
+ snapshot_service.remove()
+ changed = True
+ wait(
+ service=snapshot_service,
+ condition=lambda snapshot: snapshot is None,
+ wait=module.params['wait'],
+ timeout=module.params['timeout'],
+ )
+
+ return {
+ 'changed': changed,
+ 'id': snapshot.id if snapshot else None,
+ 'snapshot': get_dict_of_struct(snapshot),
+ }
+
+
+def restore_snapshot(module, vm_service, snapshots_service):
+ changed = False
+ snapshot_service = snapshots_service.snapshot_service(
+ module.params['snapshot_id']
+ )
+ snapshot = get_entity(snapshot_service)
+ if snapshot is None:
+ raise Exception(
+ "Snapshot with id '%s' doesn't exist" % module.params['snapshot_id']
+ )
+
+ if snapshot.snapshot_status != otypes.SnapshotStatus.IN_PREVIEW:
+ if not module.check_mode:
+ snapshot_service.restore(
+ restore_memory=module.params.get('use_memory'),
+ )
+ changed = True
+ else:
+ if not module.check_mode:
+ vm_service.commit_snapshot()
+ changed = True
+
+ if changed:
+ wait(
+ service=snapshot_service,
+ condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK,
+ wait=module.params['wait'],
+ timeout=module.params['timeout'],
+ )
+ return {
+ 'changed': changed,
+ 'id': snapshot.id if snapshot else None,
+ 'snapshot': get_dict_of_struct(snapshot),
+ }
+
+
+def get_snapshot_disk_id(module, snapshots_service):
+ snapshot_service = snapshots_service.snapshot_service(module.params.get('snapshot_id'))
+ snapshot_disks_service = snapshot_service.disks_service()
+
+ disk_id = ''
+ if module.params.get('disk_id'):
+ disk_id = module.params.get('disk_id')
+ elif module.params.get('disk_name'):
+ disk_id = get_id_by_name(snapshot_disks_service, module.params.get('disk_name'))
+ return disk_id
+
+
+def remove_old_snapshosts(module, vm_service, snapshots_service):
+ deleted_snapshots = []
+ changed = False
+ date_now = datetime.now()
+ for snapshot in snapshots_service.list():
+ if snapshot.vm is not None and snapshot.vm.name == module.params.get('vm_name'):
+ diff = date_now - snapshot.date.replace(tzinfo=None)
+ if diff.days >= module.params.get('keep_days_old'):
+ snapshot = remove_snapshot(module, vm_service, snapshots_service, snapshot.id).get('snapshot')
+ deleted_snapshots.append(snapshot)
+ changed = True
+ return dict(snapshots=deleted_snapshots, changed=changed)
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['restore', 'present', 'absent'],
+ default='present',
+ ),
+ vm_name=dict(required=True),
+ snapshot_id=dict(default=None),
+ disks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(default=None, type='str'),
+ id=dict(default=None, type='str'),
+ )
+ ),
+ disk_id=dict(default=None),
+ disk_name=dict(default=None),
+ description=dict(default=None),
+ download_image_path=dict(default=None),
+ upload_image_path=dict(default=None),
+ keep_days_old=dict(default=None, type='int'),
+ use_memory=dict(
+ default=None,
+ type='bool',
+ aliases=['restore_memory', 'save_memory'],
+ ),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'absent', ['snapshot_id']),
+ ('state', 'restore', ['snapshot_id']),
+ ]
+ )
+
+ check_sdk(module)
+ ret = {}
+ vm_name = module.params.get('vm_name')
+ auth = module.params['auth']
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vm = search_by_name(vms_service, vm_name)
+ if not vm:
+ module.fail_json(
+ msg="Vm '{name}' doesn't exist.".format(name=vm_name),
+ )
+
+ vm_service = vms_service.vm_service(vm.id)
+ snapshots_service = vms_service.vm_service(vm.id).snapshots_service()
+ try:
+ state = module.params['state']
+ if state == 'present':
+ if module.params.get('disk_id') or module.params.get('disk_name'):
+ module.params['disk_id'] = get_snapshot_disk_id(module, snapshots_service)
+ if module.params['upload_image_path']:
+ ret['changed'] = upload_disk_image(connection, module)
+ if module.params['download_image_path']:
+ ret['changed'] = download_disk_image(connection, module)
+ if module.params.get('keep_days_old') is not None:
+ ret = remove_old_snapshosts(module, vm_service, snapshots_service)
+ else:
+ ret = create_snapshot(module, vm_service, snapshots_service, connection)
+ elif state == 'restore':
+ ret = restore_snapshot(module, vm_service, snapshots_service)
+ elif state == 'absent':
+ ret = remove_snapshot(module, vm_service, snapshots_service)
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot_info.py
new file mode 100644
index 00000000..d9911e4f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_snapshot_info.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_snapshot_info
+short_description: Retrieve information about one or more oVirt/RHV virtual machine snapshots
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine snapshots."
+ - This module was called C(ovirt_snapshot_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_snapshot_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_snapshots), which
+ contains a list of snapshots. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM with snapshot."
+ required: true
+ type: str
+ description:
+ description:
+ - "Description of the snapshot, can be used as glob expression."
+ type: str
+ snapshot_id:
+ description:
+ - "Id of the snapshot we want to retrieve information about."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all snapshots which description start with C(update) for VM named C(centos7):
+- ovirt.ovirt.ovirt_snapshot_info:
+ vm: centos7
+ description: update*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_snapshots }}"
+'''
+
+RETURN = '''
+ovirt_snapshots:
+ description: "List of dictionaries describing the snapshot. Snapshot attributes are mapped to dictionary keys,
+ all snapshot attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
+ returned: On success.
+ type: list
+'''
+
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(required=True),
+ description=dict(default=None),
+ snapshot_id=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vm_name = module.params['vm']
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ snapshots_service = vms_service.service(vm.id).snapshots_service()
+ if module.params['description']:
+ snapshots = [
+ e for e in snapshots_service.list()
+ if fnmatch.fnmatch(e.description, module.params['description'])
+ ]
+ elif module.params['snapshot_id']:
+ snapshots = [
+ snapshots_service.snapshot_service(module.params['snapshot_id']).get()
+ ]
+ else:
+ snapshots = snapshots_service.list()
+
+ result = dict(
+ ovirt_snapshots=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in snapshots
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_connection.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_connection.py
new file mode 100644
index 00000000..66f07a19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_connection.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_connection
+short_description: Module to manage storage connections in oVirt
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage storage connections in oVirt"
+options:
+ id:
+ description:
+ - "Id of the storage connection to manage."
+ type: str
+ state:
+ description:
+ - "Should the storage connection be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ storage:
+ description:
+ - "Name of the storage domain to be used with storage connection."
+ type: str
+ address:
+ description:
+ - "Address of the storage server. E.g.: myserver.mydomain.com"
+ type: str
+ path:
+ description:
+ - "Path of the mount point of the storage. E.g.: /path/to/my/data"
+ type: str
+ nfs_version:
+ description:
+ - "NFS version. One of: I(auto), I(v3), I(v4) or I(v4_1)."
+ type: str
+ nfs_timeout:
+ description:
+ - "The time in tenths of a second to wait for a response before retrying NFS requests. Range 0 to 65535."
+ type: int
+ nfs_retrans:
+ description:
+ - "The number of times to retry a request before attempting further recovery actions. Range 0 to 65535."
+ type: int
+ mount_options:
+ description:
+ - "Option which will be passed when mounting storage."
+ type: str
+ password:
+ description:
+ - "A CHAP password for logging into a target."
+ type: str
+ username:
+ description:
+ - "A CHAP username for logging into a target."
+ type: str
+ port:
+ description:
+ - "Port of the iSCSI storage server."
+ type: int
+ target:
+ description:
+ - "The target IQN for the storage device."
+ type: str
+ type:
+ description:
+ - "Storage type. For example: I(nfs), I(iscsi), etc."
+ type: str
+ vfs_type:
+ description:
+ - "Virtual File System type."
+ type: str
+ force:
+ description:
+ - "This parameter is relevant only when updating a connection."
+ - "If I(true) the storage domain don't have to be in I(MAINTENANCE)
+ state, so the storage connection is updated."
+ type: bool
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add new storage connection:
+- ovirt.ovirt.ovirt_storage_connection:
+ storage: myiscsi
+ address: 10.34.63.199
+ target: iqn.2016-08-09.domain-01:nickname
+ port: 3260
+ type: iscsi
+
+# Update the existing storage connection address:
+- ovirt.ovirt.ovirt_storage_connection:
+ id: 26915c96-92ff-47e5-9e77-b581db2f2d36
+ address: 10.34.63.204
+ force: true
+
+# Remove storage connection:
+- ovirt.ovirt.ovirt_storage_connection:
+ id: 26915c96-92ff-47e5-9e77-b581db2f2d36
+'''
+
+RETURN = '''
+id:
+ description: ID of the storage connection which is managed
+ returned: On success if storage connection is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+storage_connection:
+ description: "Dictionary of all the storage connection attributes. Storage connection attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/storage_connection."
+ returned: On success if storage connection is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+class StorageConnectionModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.StorageConnection(
+ address=self.param('address'),
+ path=self.param('path'),
+ nfs_version=otypes.NfsVersion(
+ self.param('nfs_version')
+ ) if self.param('nfs_version') is not None else None,
+ nfs_timeo=self.param('nfs_timeout'),
+ nfs_retrans=self.param('nfs_retrans'),
+ mount_options=self.param('mount_options'),
+ password=self.param('password'),
+ username=self.param('username'),
+ port=self.param('port'),
+ target=self.param('target'),
+ type=otypes.StorageType(
+ self.param('type')
+ ) if self.param('type') is not None else None,
+ vfs_type=self.param('vfs_type'),
+ )
+
+ def _get_storage_domain_service(self):
+ sds_service = self._connection.system_service().storage_domains_service()
+ sd = search_by_name(sds_service, self.param('storage'))
+ if sd is None:
+ raise Exception(
+ "Storage '%s' was not found." % self.param('storage')
+ )
+ return sd, sds_service.storage_domain_service(sd.id)
+
+ def post_present(self, entity_id):
+ if self.param('storage'):
+ sd, sd_service = self._get_storage_domain_service()
+ if entity_id not in [
+ sd_conn.id for sd_conn in self._connection.follow_link(sd.storage_connections)
+ ]:
+ scs_service = sd_service.storage_connections_service()
+ if not self._module.check_mode:
+ scs_service.add(
+ connection=otypes.StorageConnection(
+ id=entity_id,
+ ),
+ )
+ self.changed = True
+
+ def pre_remove(self, entity_id):
+ if self.param('storage'):
+ sd, sd_service = self._get_storage_domain_service()
+ if entity_id in [
+ sd_conn.id for sd_conn in self._connection.follow_link(sd.storage_connections)
+ ]:
+ scs_service = sd_service.storage_connections_service()
+ sc_service = scs_service.connection_service(entity_id)
+ if not self._module.check_mode:
+ sc_service.remove()
+ self.changed = True
+
+ def update_check(self, entity):
+ return (
+ equal(self.param('address'), entity.address) and
+ equal(self.param('path'), entity.path) and
+ equal(self.param('nfs_version'), str(entity.nfs_version)) and
+ equal(self.param('nfs_timeout'), entity.nfs_timeo) and
+ equal(self.param('nfs_retrans'), entity.nfs_retrans) and
+ equal(self.param('mount_options'), entity.mount_options) and
+ equal(self.param('username'), entity.username) and
+ equal(self.param('port'), entity.port) and
+ equal(self.param('target'), entity.target) and
+ equal(self.param('type'), str(entity.type)) and
+ equal(self.param('vfs_type'), entity.vfs_type)
+ )
+
+
+def find_sc_by_attributes(module, storage_connections_service):
+ for sd_conn in [
+ sc for sc in storage_connections_service.list()
+ if str(sc.type) == module.params['type']
+ ]:
+ sd_conn_type = str(sd_conn.type)
+ if sd_conn_type in ['nfs', 'posixfs', 'glusterfs', 'localfs']:
+ if (
+ module.params['address'] == sd_conn.address and
+ module.params['path'] == sd_conn.path
+ ):
+ return sd_conn
+ elif sd_conn_type in ['iscsi', 'fcp']:
+ if (
+ module.params['address'] == sd_conn.address and
+ module.params['target'] == sd_conn.target
+ ):
+ return sd_conn
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ id=dict(default=None),
+ address=dict(default=None),
+ path=dict(default=None),
+ nfs_version=dict(default=None),
+ nfs_timeout=dict(default=None, type='int'),
+ nfs_retrans=dict(default=None, type='int'),
+ mount_options=dict(default=None),
+ password=dict(default=None, no_log=True),
+ username=dict(default=None),
+ port=dict(default=None, type='int'),
+ target=dict(default=None),
+ type=dict(default=None),
+ vfs_type=dict(default=None),
+ force=dict(type='bool', default=False),
+ storage=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_connections_service = connection.system_service().storage_connections_service()
+ storage_connection_module = StorageConnectionModule(
+ connection=connection,
+ module=module,
+ service=storage_connections_service,
+ )
+ entity = None
+ if module.params['id'] is None:
+ entity = find_sc_by_attributes(module, storage_connections_service)
+
+ state = module.params['state']
+ if state == 'present':
+ ret = storage_connection_module.create(
+ entity=entity,
+ update_params={'force': True},
+ )
+ storage_connection_module.post_present(ret['id'])
+ elif state == 'absent':
+ storage_connection_module.pre_remove(module.params['id'])
+ ret = storage_connection_module.remove(entity=entity)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain.py
new file mode 100644
index 00000000..67430a09
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain.py
@@ -0,0 +1,821 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_domain
+short_description: Module to manage storage domains in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage storage domains in oVirt/RHV"
+options:
+ id:
+ description:
+ - "Id of the storage domain to be imported."
+ type: str
+ name:
+ description:
+ - "Name of the storage domain to manage. (Not required when state is I(imported))"
+ type: str
+ state:
+ description:
+ - "Should the storage domain be present/absent/maintenance/unattached/imported/update_ovf_store"
+ - "I(imported) is supported since version 2.4."
+ - "I(update_ovf_store) is supported since version 2.5, currently if C(wait) is (true), we don't wait for update."
+ choices: ['present', 'absent', 'maintenance', 'unattached', 'imported', 'update_ovf_store']
+ default: present
+ type: str
+ description:
+ description:
+ - "Description of the storage domain."
+ type: str
+ comment:
+ description:
+ - "Comment of the storage domain."
+ type: str
+ data_center:
+ description:
+ - "Data center name where storage domain should be attached."
+ - "This parameter isn't idempotent, it's not possible to change data center of storage domain."
+ type: str
+ domain_function:
+ description:
+ - "Function of the storage domain."
+ - "This parameter isn't idempotent, it's not possible to change domain function of storage domain."
+ choices: ['data', 'iso', 'export']
+ default: 'data'
+ aliases: ['type']
+ type: str
+ host:
+ description:
+ - "Host to be used to mount storage."
+ type: str
+ localfs:
+ description:
+ - "Dictionary with values for localfs storage type:"
+ - "Note that these parameters are not idempotent."
+ suboptions:
+ path:
+ description:
+ - "Path of the mount point. E.g.: /path/to/my/data"
+ type: dict
+ nfs:
+ description:
+ - "Dictionary with values for NFS storage type:"
+ - "Note that these parameters are not idempotent."
+ type: dict
+ suboptions:
+ address:
+ description:
+ - "Address of the NFS server. E.g.: myserver.mydomain.com"
+ path:
+ description:
+ - "Path of the mount point. E.g.: /path/to/my/data"
+ version:
+ description:
+ - "NFS version. One of: I(auto), I(v3), I(v4) or I(v4_1)."
+ timeout:
+ description:
+ - "The time in tenths of a second to wait for a response before retrying NFS requests. Range 0 to 65535."
+ retrans:
+ description:
+ - "The number of times to retry a request before attempting further recovery actions. Range 0 to 65535."
+ mount_options:
+ description:
+ - "Option which will be passed when mounting storage."
+ iscsi:
+ description:
+ - "Dictionary with values for iSCSI storage type:"
+ - "Note that these parameters are not idempotent."
+ type: dict
+ suboptions:
+ address:
+ description:
+ - Address of the iSCSI storage server.
+ port:
+ description:
+ - Port of the iSCSI storage server.
+ target:
+ description:
+ - The target IQN for the storage device.
+ lun_id:
+ description:
+ - LUN id(s).
+ username:
+ description:
+ - A CHAP user name for logging into a target.
+ password:
+ description:
+ - A CHAP password for logging into a target.
+ override_luns:
+ description:
+ - If I(True) ISCSI storage domain luns will be overridden before adding.
+ type: bool
+ target_lun_map:
+ description:
+ - List of dictionary containing targets and LUNs.
+ posixfs:
+ description:
+ - "Dictionary with values for PosixFS storage type:"
+ - "Note that these parameters are not idempotent."
+ type: dict
+ suboptions:
+ path:
+ description:
+ - "Path of the mount point. E.g.: /path/to/my/data"
+ vfs_type:
+ description:
+ - Virtual File System type.
+ mount_options:
+ description:
+ - Option which will be passed when mounting storage.
+ glusterfs:
+ description:
+ - "Dictionary with values for GlusterFS storage type:"
+ - "Note that these parameters are not idempotent."
+ type: dict
+ suboptions:
+ address:
+ description:
+ - "Address of the Gluster server. E.g.: myserver.mydomain.com"
+ path:
+ description:
+ - "Path of the mount point. E.g.: /path/to/my/data"
+ mount_options:
+ description:
+ - Option which will be passed when mounting storage.
+ managed_block_storage:
+ description:
+ - "Dictionary with values for managed block storage type"
+ - "Note: available from ovirt 4.3"
+ type: dict
+ suboptions:
+ driver_options:
+ description:
+ - "The options to be passed when creating a storage domain using a cinder driver."
+ - "List of dictionary containing C(name) and C(value) of driver option"
+ type: list
+ elements: dict
+ driver_sensitive_options:
+ description:
+ - "Parameters containing sensitive information, to be passed when creating a storage domain using a cinder driver."
+ - "List of dictionary containing C(name) and C(value) of driver sensitive option"
+ type: list
+ elements: dict
+ fcp:
+ description:
+ - "Dictionary with values for fibre channel storage type:"
+ - "Note that these parameters are not idempotent."
+ type: dict
+ suboptions:
+ lun_id:
+ description:
+ - LUN id.
+ override_luns:
+ description:
+ - If I(True) FCP storage domain LUNs will be overridden before adding.
+ type: bool
+ wipe_after_delete:
+ description:
+ - "Boolean flag which indicates whether the storage domain should wipe the data after delete."
+ type: bool
+ backup:
+ description:
+ - "Boolean flag which indicates whether the storage domain is configured as backup or not."
+ type: bool
+ critical_space_action_blocker:
+ description:
+ - "Indicates the minimal free space the storage domain should contain in percentages."
+ type: int
+ warning_low_space:
+ description:
+ - "Indicates the minimum percentage of a free space in a storage domain to present a warning."
+ type: int
+ destroy:
+ description:
+ - "Logical remove of the storage domain. If I(true) retains the storage domain's data for import."
+ - "This parameter is relevant only when C(state) is I(absent)."
+ type: bool
+ format:
+ description:
+ - "If I(True) storage domain will be formatted after removing it from oVirt/RHV."
+ - "This parameter is relevant only when C(state) is I(absent)."
+ type: bool
+ discard_after_delete:
+ description:
+ - "If I(True) storage domain blocks will be discarded upon deletion. Enabled by default."
+ - "This parameter is relevant only for block based storage domains."
+ type: bool
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add data NFS storage domain
+- ovirt.ovirt.ovirt_storage_domain:
+ name: data_nfs
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/data
+
+# Add data NFS storage domain with id for data center
+- ovirt.ovirt.ovirt_storage_domain:
+ name: data_nfs
+ host: myhost
+ data_center: 11111
+ nfs:
+ address: 10.34.63.199
+ path: /path/data
+ mount_options: noexec,nosuid
+
+# Add data localfs storage domain
+- ovirt.ovirt.ovirt_storage_domain:
+ name: data_localfs
+ host: myhost
+ data_center: mydatacenter
+ localfs:
+ path: /path/to/data
+
+# Add data iSCSI storage domain:
+- ovirt.ovirt.ovirt_storage_domain:
+ name: data_iscsi
+ host: myhost
+ data_center: mydatacenter
+ iscsi:
+ target: iqn.2016-08-09.domain-01:nickname
+ lun_id:
+ - 1IET_000d0001
+ - 1IET_000d0002
+ address: 10.34.63.204
+ discard_after_delete: True
+ backup: False
+ critical_space_action_blocker: 5
+ warning_low_space: 10
+
+# Since Ansible 2.5 you can specify multiple targets for storage domain,
+# Add data iSCSI storage domain with multiple targets:
+- ovirt.ovirt.ovirt_storage_domain:
+ name: data_iscsi
+ host: myhost
+ data_center: mydatacenter
+ iscsi:
+ target_lun_map:
+ - target: iqn.2016-08-09.domain-01:nickname
+ lun_id: 1IET_000d0001
+ - target: iqn.2016-08-09.domain-02:nickname
+ lun_id: 1IET_000d0002
+ address: 10.34.63.204
+ discard_after_delete: True
+
+# Add data glusterfs storage domain
+- ovirt.ovirt.ovirt_storage_domain:
+ name: glusterfs_1
+ host: myhost
+ data_center: mydatacenter
+ glusterfs:
+ address: 10.10.10.10
+ path: /path/data
+
+# Create export NFS storage domain:
+- ovirt.ovirt.ovirt_storage_domain:
+ name: myexportdomain
+ domain_function: export
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/export
+ wipe_after_delete: False
+ backup: True
+ critical_space_action_blocker: 2
+ warning_low_space: 5
+
+# Import export NFS storage domain:
+- ovirt.ovirt.ovirt_storage_domain:
+ state: imported
+ domain_function: export
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/export
+
+# Import FCP storage domain:
+- ovirt.ovirt.ovirt_storage_domain:
+ state: imported
+ name: data_fcp
+ host: myhost
+ data_center: mydatacenter
+ fcp: {}
+
+# Update OVF_STORE:
+- ovirt.ovirt.ovirt_storage_domain:
+ state: update_ovf_store
+ name: domain
+
+# Create ISO NFS storage domain
+- ovirt.ovirt.ovirt_storage_domain:
+ name: myiso
+ domain_function: iso
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/iso
+
+# Create managed storage domain
+# Available from ovirt 4.3 and ansible 2.9
+- ovirt.ovirt.ovirt_storage_domain:
+ name: my_managed_domain
+ host: myhost
+ data_center: mydatacenter
+ managed_block_storage:
+ driver_options:
+ - name: rbd_pool
+ value: pool1
+ - name: rbd_user
+ value: admin
+ - name: volume_driver
+ value: cinder.volume.drivers.rbd.RBDDriver
+ - name: rbd_keyring_conf
+ value: /etc/ceph/keyring
+ driver_sensitive_options:
+ - name: secret_password
+ value: password
+
+# Remove storage domain
+- ovirt.ovirt.ovirt_storage_domain:
+ state: absent
+ name: mystorage_domain
+ format: true
+'''
+
+RETURN = '''
+id:
+ description: ID of the storage domain which is managed
+ returned: On success if storage domain is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+storage_domain:
+ description: "Dictionary of all the storage domain attributes. Storage domain attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain."
+ returned: On success if storage domain is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+
+ from ovirtsdk4.types import StorageDomainStatus as sdstate
+ from ovirtsdk4.types import HostStatus as hoststate
+ from ovirtsdk4.types import DataCenterStatus as dcstatus
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_entity,
+ get_id_by_name,
+ OvirtRetry,
+ ovirt_full_argument_spec,
+ search_by_name,
+ search_by_attributes,
+ wait,
+)
+
+
+class StorageDomainModule(BaseModule):
+
+ def _get_storage_type(self):
+ for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp', 'localfs', 'managed_block_storage']:
+ if self.param(sd_type) is not None:
+ return sd_type
+
+ def _get_storage(self):
+ for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp', 'localfs', 'managed_block_storage']:
+ if self.param(sd_type) is not None:
+ return self.param(sd_type)
+
+ def _login(self, storage_type, storage):
+ if storage_type == 'iscsi':
+ hosts_service = self._connection.system_service().hosts_service()
+ host_id = get_id_by_name(hosts_service, self.param('host'))
+ if storage.get('target'):
+ hosts_service.host_service(host_id).iscsi_login(
+ iscsi=otypes.IscsiDetails(
+ username=storage.get('username'),
+ password=storage.get('password'),
+ address=storage.get('address'),
+ target=storage.get('target'),
+ ),
+ )
+ elif storage.get('target_lun_map'):
+ for target in [m['target'] for m in storage.get('target_lun_map')]:
+ hosts_service.host_service(host_id).iscsi_login(
+ iscsi=otypes.IscsiDetails(
+ username=storage.get('username'),
+ password=storage.get('password'),
+ address=storage.get('address'),
+ target=target,
+ ),
+ )
+
+ def __target_lun_map(self, storage):
+ if storage.get('target'):
+ lun_ids = storage.get('lun_id') if isinstance(storage.get('lun_id'), list) else [(storage.get('lun_id'))]
+ return [(lun_id, storage.get('target')) for lun_id in lun_ids]
+ elif storage.get('target_lun_map'):
+ return [(target_map.get('lun_id'), target_map.get('target')) for target_map in storage.get('target_lun_map')]
+ else:
+ lun_ids = storage.get('lun_id') if isinstance(storage.get('lun_id'), list) else [(storage.get('lun_id'))]
+ return [(lun_id, None) for lun_id in lun_ids]
+
+ def build_entity(self):
+ storage_type = self._get_storage_type()
+ storage = self._get_storage()
+ self._login(storage_type, storage)
+
+ return otypes.StorageDomain(
+ name=self.param('name'),
+ description=self.param('description'),
+ comment=self.param('comment'),
+ wipe_after_delete=self.param('wipe_after_delete'),
+ backup=self.param('backup'),
+ critical_space_action_blocker=self.param('critical_space_action_blocker'),
+ warning_low_space_indicator=self.param('warning_low_space'),
+ import_=True if self.param('state') == 'imported' else None,
+ id=self.param('id') if self.param('state') == 'imported' else None,
+ type=otypes.StorageDomainType(storage_type if storage_type == 'managed_block_storage' else self.param('domain_function')),
+ host=otypes.Host(name=self.param('host')),
+ discard_after_delete=self.param('discard_after_delete'),
+ storage=otypes.HostStorage(
+ driver_options=[
+ otypes.Property(
+ name=do.get('name'),
+ value=do.get('value')
+ ) for do in storage.get('driver_options')
+ ] if storage.get('driver_options') else None,
+ driver_sensitive_options=[
+ otypes.Property(
+ name=dso.get('name'),
+ value=dso.get('value')
+ ) for dso in storage.get('driver_sensitive_options')
+ ] if storage.get('driver_sensitive_options') else None,
+ type=otypes.StorageType(storage_type),
+ logical_units=[
+ otypes.LogicalUnit(
+ id=lun_id,
+ address=storage.get('address'),
+ port=int(storage.get('port', 3260)),
+ target=target,
+ username=storage.get('username'),
+ password=storage.get('password'),
+ ) for lun_id, target in self.__target_lun_map(storage)
+ ] if storage_type in ['iscsi', 'fcp'] else None,
+ override_luns=storage.get('override_luns'),
+ mount_options=storage.get('mount_options'),
+ vfs_type=(
+ 'glusterfs'
+ if storage_type in ['glusterfs'] else storage.get('vfs_type')
+ ),
+ address=storage.get('address'),
+ path=storage.get('path'),
+ nfs_retrans=storage.get('retrans'),
+ nfs_timeo=storage.get('timeout'),
+ nfs_version=otypes.NfsVersion(
+ storage.get('version')
+ ) if storage.get('version') else None,
+ ) if storage_type is not None else None
+ )
+
+ def _find_attached_datacenter_name(self, sd_name):
+ """
+ Finds the name of the datacenter that a given
+ storage domain is attached to.
+
+ Args:
+ sd_name (str): Storage Domain name
+
+ Returns:
+ str: Data Center name
+
+ Raises:
+ Exception: In case storage domain in not attached to
+ an active Datacenter
+ """
+ dcs_service = self._connection.system_service().data_centers_service()
+ dc = search_by_attributes(dcs_service, storage=sd_name)
+ if dc is None:
+ raise Exception(
+ "Can't bring storage to state `%s`, because it seems that"
+ "it is not attached to any datacenter"
+ % self.param('state')
+ )
+ else:
+ if dc.status == dcstatus.UP:
+ return dc.name
+ else:
+ raise Exception(
+ "Can't bring storage to state `%s`, because Datacenter "
+ "%s is not UP" % (self.param('state'), dc.name)
+ )
+
+ def _attached_sds_service(self, dc_name):
+ # Get data center object of the storage domain:
+ dcs_service = self._connection.system_service().data_centers_service()
+
+ # Search the data_center name, if it does not exist, try to search by guid.
+ dc = search_by_name(dcs_service, dc_name)
+ if dc is None:
+ dc = get_entity(dcs_service.service(dc_name))
+ if dc is None:
+ return None
+
+ dc_service = dcs_service.data_center_service(dc.id)
+ return dc_service.storage_domains_service()
+
+ def _attached_sd_service(self, storage_domain):
+ dc_name = self.param('data_center')
+ if not dc_name:
+ # Find the DC, where the storage resides:
+ dc_name = self._find_attached_datacenter_name(storage_domain.name)
+ attached_sds_service = self._attached_sds_service(dc_name)
+ attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id)
+ return attached_sd_service
+
+ def _maintenance(self, storage_domain):
+ attached_sd_service = self._attached_sd_service(storage_domain)
+ attached_sd = get_entity(attached_sd_service)
+
+ if attached_sd and attached_sd.status != sdstate.MAINTENANCE:
+ if not self._module.check_mode:
+ attached_sd_service.deactivate()
+ self.changed = True
+
+ wait(
+ service=attached_sd_service,
+ condition=lambda sd: sd.status == sdstate.MAINTENANCE,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def _unattach(self, storage_domain):
+ attached_sd_service = self._attached_sd_service(storage_domain)
+ attached_sd = get_entity(attached_sd_service)
+
+ if attached_sd and attached_sd.status == sdstate.MAINTENANCE:
+ if not self._module.check_mode:
+ # Detach the storage domain:
+ attached_sd_service.remove()
+ self.changed = True
+ # Wait until storage domain is detached:
+ wait(
+ service=attached_sd_service,
+ condition=lambda sd: sd is None,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def pre_remove(self, storage_domain):
+ # In case the user chose to destroy the storage domain there is no need to
+ # move it to maintenance or detach it, it should simply be removed from the DB.
+ # Also if storage domain in already unattached skip this step.
+ if storage_domain.status == sdstate.UNATTACHED or self.param('destroy'):
+ return
+ # Before removing storage domain we need to put it into maintenance state:
+ self._maintenance(storage_domain)
+
+ # Before removing storage domain we need to detach it from data center:
+ self._unattach(storage_domain)
+
+ def post_create_check(self, sd_id):
+ storage_domain = self._service.service(sd_id).get()
+ dc_name = self.param('data_center')
+ if not dc_name:
+ # Find the DC, where the storage resides:
+ dc_name = self._find_attached_datacenter_name(storage_domain.name)
+ self._service = self._attached_sds_service(dc_name)
+
+ # If storage domain isn't attached, attach it:
+ attached_sd_service = self._service.service(storage_domain.id)
+ if get_entity(attached_sd_service) is None:
+ self._service.add(
+ otypes.StorageDomain(
+ id=storage_domain.id,
+ ),
+ )
+ self.changed = True
+ # Wait until storage domain is in maintenance:
+ wait(
+ service=attached_sd_service,
+ condition=lambda sd: sd.status == sdstate.ACTIVE,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def unattached_pre_action(self, storage_domain):
+ dc_name = self.param('data_center')
+ if not dc_name:
+ # Find the DC, where the storage resides:
+ dc_name = self._find_attached_datacenter_name(storage_domain.name)
+ self._service = self._attached_sds_service(dc_name)
+ self._maintenance(storage_domain)
+
+ def update_check(self, entity):
+ return (
+ equal(self.param('comment'), entity.comment) and
+ equal(self.param('description'), entity.description) and
+ equal(self.param('backup'), entity.backup) and
+ equal(self.param('critical_space_action_blocker'), entity.critical_space_action_blocker) and
+ equal(self.param('discard_after_delete'), entity.discard_after_delete) and
+ equal(self.param('wipe_after_delete'), entity.wipe_after_delete) and
+ equal(self.param('warning_low_space'), entity.warning_low_space_indicator)
+ )
+
+
+def failed_state(sd):
+ return sd.status in [sdstate.UNKNOWN, sdstate.INACTIVE]
+
+
+def control_state(sd_module):
+ sd = sd_module.search_entity()
+ if sd is None:
+ return
+
+ sd_service = sd_module._service.service(sd.id)
+
+ # In the case of no status returned, it's an attached storage domain.
+ # Redetermine the corresponding service and entity:
+ if sd.status is None:
+ sd_service = sd_module._attached_sd_service(sd)
+ sd = get_entity(sd_service)
+
+ if sd.status == sdstate.LOCKED:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status != sdstate.LOCKED,
+ fail_condition=failed_state,
+ )
+
+ if failed_state(sd):
+ raise Exception("Not possible to manage storage domain '%s'." % sd.name)
+ elif sd.status == sdstate.ACTIVATING:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status == sdstate.ACTIVE,
+ fail_condition=failed_state,
+ )
+ elif sd.status == sdstate.DETACHING:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status == sdstate.UNATTACHED,
+ fail_condition=failed_state,
+ )
+ elif sd.status == sdstate.PREPARING_FOR_MAINTENANCE:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status == sdstate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'maintenance', 'unattached', 'imported', 'update_ovf_store'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(default=None),
+ description=dict(default=None),
+ comment=dict(default=None),
+ data_center=dict(default=None),
+ domain_function=dict(choices=['data', 'iso', 'export'], default='data', aliases=['type']),
+ host=dict(default=None),
+ localfs=dict(default=None, type='dict'),
+ nfs=dict(default=None, type='dict'),
+ iscsi=dict(default=None, type='dict'),
+ managed_block_storage=dict(default=None, type='dict', options=dict(
+ driver_options=dict(type='list', elements='dict'),
+ driver_sensitive_options=dict(type='list', no_log=True, elements='dict'))),
+ posixfs=dict(default=None, type='dict'),
+ glusterfs=dict(default=None, type='dict'),
+ fcp=dict(default=None, type='dict'),
+ wipe_after_delete=dict(type='bool', default=None),
+ backup=dict(type='bool', default=None),
+ critical_space_action_blocker=dict(type='int', default=None),
+ warning_low_space=dict(type='int', default=None),
+ destroy=dict(type='bool', default=None),
+ format=dict(type='bool', default=None),
+ discard_after_delete=dict(type='bool', default=None)
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domains_module = StorageDomainModule(
+ connection=connection,
+ module=module,
+ service=storage_domains_service,
+ )
+
+ state = module.params['state']
+ control_state(storage_domains_module)
+ if state == 'absent':
+ # Pick random available host when host parameter is missing
+ host_param = module.params['host']
+ if not host_param:
+ host = search_by_attributes(connection.system_service().hosts_service(), status='up')
+ if host is None:
+ raise Exception(
+ "Not possible to remove storage domain '%s' "
+ "because no host found with status `up`." % module.params['name']
+ )
+ host_param = host.name
+ ret = storage_domains_module.remove(
+ destroy=module.params['destroy'],
+ format=module.params['format'],
+ host=host_param,
+ )
+ elif state == 'present' or state == 'imported':
+ sd_id = storage_domains_module.create()['id']
+ storage_domains_module.post_create_check(sd_id)
+ ret = storage_domains_module.action(
+ action='activate',
+ action_condition=lambda s: s.status == sdstate.MAINTENANCE,
+ wait_condition=lambda s: s.status == sdstate.ACTIVE,
+ fail_condition=failed_state,
+ search_params={'id': sd_id} if state == 'imported' else None
+ )
+ elif state == 'maintenance':
+ sd_id = storage_domains_module.create()['id']
+ storage_domains_module.post_create_check(sd_id)
+
+ ret = OvirtRetry.backoff(tries=5, delay=1, backoff=2)(
+ storage_domains_module.action
+ )(
+ action='deactivate',
+ action_condition=lambda s: s.status == sdstate.ACTIVE,
+ wait_condition=lambda s: s.status == sdstate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+ elif state == 'unattached':
+ ret = storage_domains_module.create()
+ storage_domains_module.pre_remove(
+ storage_domain=storage_domains_service.service(ret['id']).get()
+ )
+ ret['changed'] = storage_domains_module.changed
+ elif state == 'update_ovf_store':
+ ret = storage_domains_module.action(
+ action='update_ovf_store'
+ )
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain_info.py
new file mode 100644
index 00000000..5bb2b279
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_domain_info.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_domain_info
+short_description: Retrieve information about one or more oVirt/RHV storage domains
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV storage domains."
+ - This module was called C(ovirt_storage_domain_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_storage_domain_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_storage_domains), which
+ contains a list of storage domains. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search storage domain X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all storage domains which names start with C(data) and
+# belong to data center C(west):
+- ovirt.ovirt.ovirt_storage_domain_info:
+ pattern: name=data* and datacenter=west
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_domains }}"
+'''
+
+RETURN = '''
+ovirt_storage_domains:
+ description: "List of dictionaries describing the storage domains. Storage_domain attributes are mapped to dictionary keys,
+ all storage domains attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domains = storage_domains_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_storage_domains=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in storage_domains
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_template_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_template_info.py
new file mode 100644
index 00000000..3230c7fc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_template_info.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_template_info
+short_description: Retrieve information about one or more oVirt/RHV templates relate to a storage domain.
+version_added: "1.0.0"
+author: "Maor Lipchuk (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV templates relate to a storage domain."
+ - This module was called C(ovirt_storage_template_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_storage_template_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_storage_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered templates which contain one or more
+ disks which reside on a storage domain or diskless templates."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of templates to return. If not specified all the templates are returned."
+ type: int
+ storage_domain:
+ description:
+ - "The storage domain name where the templates should be listed."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all Templates which relate to a storage domain and
+# are unregistered:
+- ovirt.ovirt.ovirt_storage_template_info:
+ unregistered=True
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_templates }}"
+'''
+
+RETURN = '''
+ovirt_storage_templates:
+ description: "List of dictionaries describing the Templates. Template attributes are mapped to dictionary keys,
+ all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(default=None),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ templates_service = storage_domain_service.templates_service()
+
+ # Find the unregistered Template we want to register:
+ if module.params.get('unregistered'):
+ templates = templates_service.list(unregistered=True)
+ else:
+ templates = templates_service.list(max=module.params['max'])
+ result = dict(
+ ovirt_storage_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_vm_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_vm_info.py
new file mode 100644
index 00000000..03c241a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_storage_vm_info.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_vm_info
+short_description: Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain.
+version_added: "1.0.0"
+author: "Maor Lipchuk (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain."
+ - This module was called C(ovirt_storage_vm_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_storage_vm_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_storage_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered virtual machines which contain one or more
+ disks which reside on a storage domain or diskless virtual machines."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of virtual machines to return. If not specified all the virtual machines are returned."
+ type: int
+ storage_domain:
+ description:
+ - "The storage domain name where the virtual machines should be listed."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all VMs which relate to a storage domain and
+# are unregistered:
+- ovirt.ovirt.ovirt_storage_vm_info:
+ unregistered=True
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_vms }}"
+'''
+
+RETURN = '''
+ovirt_storage_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(default=None),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ vms_service = storage_domain_service.vms_service()
+
+ # Find the unregistered VM we want to register:
+ if module.params.get('unregistered'):
+ vms = vms_service.list(unregistered=True)
+ else:
+ vms = vms_service.list()
+ result = dict(
+ ovirt_storage_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vms
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_system_option_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_system_option_info.py
new file mode 100644
index 00000000..45bb19b5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_system_option_info.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_system_option_info
+short_description: Retrieve information about one oVirt/RHV system options.
+version_added: "1.3.0"
+author: "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one oVirt/RHV system options."
+notes:
+ - "This module returns a variable C(ovirt_system_option_info), which
+ contains a dict of system option. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of system option."
+ type: str
+ version:
+ description:
+ - "The version of the option."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- ovirt.ovirt.ovirt_system_option_info:
+ name: "ServerCPUList"
+ version: "4.4"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_system_option }}"
+'''
+
+RETURN = '''
+ovirt_system_option:
+ description: "Dictionary describing the system option. Option attributes are mapped to dictionary keys,
+ all option attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/system_option."
+ returned: On success.
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ version=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ options_service = connection.system_service().options_service()
+ option_service = options_service.option_service(module.params.get('name'))
+
+ try:
+ option = option_service.get(version=module.params.get('version'))
+ except Exception as e:
+ if str(e) == "HTTP response code is 404.":
+ raise ValueError("Could not find the option with name '{0}'".format(module.params.get('name')))
+ raise Exception("Unexpected error: '{0}'".format(e))
+
+ result = dict(
+ ovirt_system_option=get_dict_of_struct(
+ struct=option,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ),
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag.py
new file mode 100644
index 00000000..bf47f9a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_tag
+short_description: Module to manage tags in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "This module manage tags in oVirt/RHV. It can also manage assignments
+ of those tags to entities."
+options:
+ id:
+ description:
+ - "ID of the tag to manage."
+ type: str
+ name:
+ description:
+ - "Name of the tag to manage."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the tag be present/absent/attached/detached."
+ - "C(Note): I(attached) and I(detached) states are supported since version 2.4."
+ choices: ['present', 'absent', 'attached', 'detached']
+ default: present
+ type: str
+ description:
+ description:
+ - "Description of the tag to manage."
+ type: str
+ parent:
+ description:
+ - "Name of the parent tag."
+ type: str
+ vms:
+ description:
+ - "List of the VMs names, which should have assigned this tag."
+ type: list
+ elements: str
+ hosts:
+ description:
+ - "List of the hosts names, which should have assigned this tag."
+ type: list
+ elements: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create(if not exists) and assign tag to vms vm1 and vm2:
+- ovirt.ovirt.ovirt_tag:
+ name: mytag
+ vms:
+ - vm1
+ - vm2
+
+# Attach a tag to VM 'vm3', keeping the rest already attached tags on VM:
+- ovirt.ovirt.ovirt_tag:
+ name: mytag
+ state: attached
+ vms:
+ - vm3
+
+# Detach a tag from VM 'vm3', keeping the rest already attached tags on VM:
+- ovirt.ovirt.ovirt_tag:
+ name: mytag
+ state: detached
+ vms:
+ - vm3
+
+# To detach all VMs from tag:
+- ovirt.ovirt.ovirt_tag:
+ name: mytag
+ vms: []
+
+# Remove tag
+- ovirt.ovirt.ovirt_tag:
+ state: absent
+ name: mytag
+
+# Change Tag Name
+- ovirt.ovirt.ovirt_tag:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_tag_name"
+'''
+
+RETURN = '''
+id:
+ description: ID of the tag which is managed
+ returned: On success if tag is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+tag:
+ description: "Dictionary of all the tag attributes. Tag attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
+ returned: On success if tag is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+)
+
+
+class TagsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Tag(
+ id=self._module.params['id'],
+ name=self._module.params['name'],
+ description=self._module.params['description'],
+ parent=otypes.Tag(
+ name=self._module.params['parent'],
+ ) if self._module.params['parent'] else None,
+ )
+
+ def post_create(self, entity):
+ self.update_check(entity)
+
+ def _update_tag_assignments(self, entity, name):
+ if self._module.params[name] is None:
+ return
+
+ state = self.param('state')
+ entities_service = getattr(self._connection.system_service(), '%s_service' % name)()
+ current_vms = [
+ vm.name
+ for vm in entities_service.list(search='tag=%s' % self._module.params['name'])
+ ]
+ # Assign tags:
+ if state in ['present', 'attached', 'detached']:
+ for entity_name in self._module.params[name]:
+ entity_id = get_id_by_name(entities_service, entity_name)
+ tags_service = entities_service.service(entity_id).tags_service()
+ current_tags = [tag.name for tag in tags_service.list()]
+ # Assign the tag:
+ if state in ['attached', 'present']:
+ if self._module.params['name'] not in current_tags:
+ if not self._module.check_mode:
+ tags_service.add(
+ tag=otypes.Tag(
+ name=self._module.params['name'],
+ ),
+ )
+ self.changed = True
+ # Detach the tag:
+ elif state == 'detached':
+ if self._module.params['name'] in current_tags:
+ tag_id = get_id_by_name(tags_service, self.param('name'))
+ if not self._module.check_mode:
+ tags_service.tag_service(tag_id).remove()
+ self.changed = True
+
+ # Unassign tags:
+ if state == 'present':
+ for entity_name in [e for e in current_vms if e not in self._module.params[name]]:
+ if not self._module.check_mode:
+ entity_id = get_id_by_name(entities_service, entity_name)
+ tags_service = entities_service.service(entity_id).tags_service()
+ tag_id = get_id_by_name(tags_service, self.param('name'))
+ tags_service.tag_service(tag_id).remove()
+ self.changed = True
+
+ def _get_parent(self, entity):
+ parent = None
+ if entity.parent:
+ parent = self._connection.follow_link(entity.parent).name
+ return parent
+
+ def update_check(self, entity):
+ self._update_tag_assignments(entity, 'vms')
+ self._update_tag_assignments(entity, 'hosts')
+ return (
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('parent'), self._get_parent(entity))
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'attached', 'detached'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(required=True),
+ description=dict(default=None),
+ parent=dict(default=None),
+ vms=dict(default=None, type='list', elements='str'),
+ hosts=dict(default=None, type='list', elements='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ tags_service = connection.system_service().tags_service()
+ tags_module = TagsModule(
+ connection=connection,
+ module=module,
+ service=tags_service,
+ )
+
+ state = module.params['state']
+ if state in ['present', 'attached', 'detached']:
+ ret = tags_module.create()
+ elif state == 'absent':
+ ret = tags_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag_info.py
new file mode 100644
index 00000000..6b78e7d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_tag_info.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_tag_info
+short_description: Retrieve information about one or more oVirt/RHV tags
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV tags."
+ - This module was called C(ovirt_tag_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_tag_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_tags), which
+ contains a list of tags. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the tag which should be listed."
+ type: str
+ vm:
+ description:
+ - "Name of the VM, which tags should be listed."
+ type: str
+ host:
+ description:
+ - "Name of the host, which tags should be listed."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all tags, which names start with C(tag):
+- ovirt.ovirt.ovirt_tag_info:
+ name: tag*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+# Gather information about all tags, which are assigned to VM C(postgres):
+- ovirt.ovirt.ovirt_tag_info:
+ vm: postgres
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+# Gather information about all tags, which are assigned to host C(west):
+- ovirt.ovirt.ovirt_tag_info:
+ host: west
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+'''
+
+RETURN = '''
+ovirt_tags:
+ description: "List of dictionaries describing the tags. Tags attributes are mapped to dictionary keys,
+ all tags attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ tags_service = connection.system_service().tags_service()
+ tags = []
+ all_tags = tags_service.list()
+ if module.params['name']:
+ tags.extend([
+ t for t in all_tags
+ if fnmatch.fnmatch(t.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ host = search_by_name(hosts_service, module.params['host'])
+ if host is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ tags.extend(hosts_service.host_service(host.id).tags_service().list())
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ vm = search_by_name(vms_service, module.params['vm'])
+ if vm is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ tags.extend(vms_service.vm_service(vm.id).tags_service().list())
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ tags = all_tags
+
+ result = dict(
+ ovirt_tags=[
+ get_dict_of_struct(
+ struct=t,
+ connection=connection,
+ fetch_nested=module.params['fetch_nested'],
+ attributes=module.params['nested_attributes'],
+ ) for t in tags
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template.py
new file mode 100644
index 00000000..c7884686
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template.py
@@ -0,0 +1,1086 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_template
+short_description: Module to manage virtual machine templates in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage virtual machine templates in oVirt/RHV."
+options:
+ name:
+ description:
+ - "Name of the template to manage."
+ type: str
+ id:
+ description:
+ - "ID of the template to be registered."
+ type: str
+ state:
+ description:
+ - "Should the template be present/absent/exported/imported/registered.
+ When C(state) is I(registered) and the unregistered template's name
+ belongs to an already registered in engine template in the same DC
+ then we fail to register the unregistered template."
+ choices: ['present', 'absent', 'exported', 'imported', 'registered']
+ default: present
+ type: str
+ vm:
+ description:
+ - "Name of the VM, which will be used to create template."
+ type: str
+ description:
+ description:
+ - "Description of the template."
+ type: str
+ cpu_profile:
+ description:
+ - "CPU profile to be set to template."
+ type: str
+ cluster:
+ description:
+ - "Name of the cluster, where template should be created/imported."
+ type: str
+ allow_partial_import:
+ description:
+ - "Boolean indication whether to allow partial registration of a template when C(state) is registered."
+ type: bool
+ vnic_profile_mappings:
+ description:
+ - "Mapper which maps an external virtual NIC profile to one that exists in the engine when C(state) is registered.
+ vnic_profile is described by the following dictionary:"
+ suboptions:
+ source_network_name:
+ description:
+ - The network name of the source network.
+ source_profile_name:
+ description:
+ - The profile name related to the source network.
+ target_profile_id:
+ description:
+ - The id of the target profile id to be mapped to in the engine.
+ type: list
+ elements: dict
+ cluster_mappings:
+ description:
+ - "Mapper which maps cluster name between Template's OVF and the destination cluster this Template should be registered to,
+ relevant when C(state) is registered.
+ Cluster mapping is described by the following dictionary:"
+ suboptions:
+ source_name:
+ description:
+ - The name of the source cluster.
+ dest_name:
+ description:
+ - The name of the destination cluster.
+ type: list
+ elements: dict
+ role_mappings:
+ description:
+ - "Mapper which maps role name between Template's OVF and the destination role this Template should be registered to,
+ relevant when C(state) is registered.
+ Role mapping is described by the following dictionary:"
+ suboptions:
+ source_name:
+ description:
+ - The name of the source role.
+ dest_name:
+ description:
+ - The name of the destination role.
+ type: list
+ elements: dict
+ domain_mappings:
+ description:
+ - "Mapper which maps aaa domain name between Template's OVF and the destination aaa domain this Template should be registered to,
+ relevant when C(state) is registered.
+ The aaa domain mapping is described by the following dictionary:"
+ suboptions:
+ source_name:
+ description:
+ - The name of the source aaa domain.
+ dest_name:
+ description:
+ - The name of the destination aaa domain.
+ type: list
+ elements: dict
+ exclusive:
+ description:
+ - "When C(state) is I(exported) this parameter indicates if the existing templates with the
+ same name should be overwritten."
+ type: bool
+ export_domain:
+ description:
+ - "When C(state) is I(exported) or I(imported) this parameter specifies the name of the
+ export storage domain."
+ type: str
+ image_provider:
+ description:
+ - "When C(state) is I(imported) this parameter specifies the name of the image provider to be used."
+ type: str
+ image_disk:
+ description:
+ - "When C(state) is I(imported) and C(image_provider) is used this parameter specifies the name of disk
+ to be imported as template."
+ aliases: ['glance_image_disk_name']
+ type: str
+ io_threads:
+ description:
+ - "Number of IO threads used by virtual machine. I(0) means IO threading disabled."
+ type: int
+ template_image_disk_name:
+ description:
+ - "When C(state) is I(imported) and C(image_provider) is used this parameter specifies the new name for imported disk,
+ if omitted then I(image_disk) name is used by default.
+ This parameter is used only in case of importing disk image from Glance domain."
+ type: str
+ storage_domain:
+ description:
+ - "When C(state) is I(imported) this parameter specifies the name of the destination data storage domain.
+ When C(state) is I(registered) this parameter specifies the name of the data storage domain of the unregistered template."
+ type: str
+ clone_permissions:
+ description:
+ - "If I(True) then the permissions of the VM (only the direct ones, not the inherited ones)
+ will be copied to the created template."
+ - "This parameter is used only when C(state) I(present)."
+ type: bool
+ default: False
+ seal:
+ description:
+ - "'Sealing' is an operation that erases all machine-specific configurations from a filesystem:
+ This includes SSH keys, UDEV rules, MAC addresses, system ID, hostname, etc.
+ If I(true) subsequent virtual machines made from this template will avoid configuration inheritance."
+ - "This parameter is used only when C(state) I(present)."
+ default: False
+ type: bool
+ operating_system:
+ description:
+ - Operating system of the template, for example 'rhel_8x64'.
+ - Default value is set by oVirt/RHV engine.
+ - Use the M(ovirt_vm_os_info) module to obtain the current list.
+ type: str
+ memory:
+ description:
+ - Amount of memory of the template. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ type: str
+ memory_guaranteed:
+ description:
+ - Amount of minimal guaranteed memory of the template.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
+ type: str
+ memory_max:
+ description:
+ - Upper bound of template memory up to which memory hot-plug can be performed.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ type: str
+ version:
+ description:
+ - "C(name) - The name of this version."
+ - "C(number) - The index of this version in the versions hierarchy of the template. Used for editing of sub template."
+ type: dict
+ clone_name:
+ description:
+ - Name for importing Template from storage domain.
+ - If not defined, C(name) will be used.
+ type: str
+ usb_support:
+ description:
+ - "I(True) enable USB support, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ timezone:
+ description:
+ - Sets time zone offset of the guest hardware clock.
+ - For example C(Etc/GMT)
+ type: str
+ sso:
+ description:
+ - "I(True) enable Single Sign On by Guest Agent, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ soundcard_enabled:
+ description:
+ - "If I(true), the sound card is added to the virtual machine."
+ type: bool
+ smartcard_enabled:
+ description:
+ - "If I(true), use smart card authentication."
+ type: bool
+ cloud_init:
+ description:
+ - Dictionary with values for Unix-like Virtual Machine initialization using cloud init.
+ type: dict
+ suboptions:
+ host_name:
+ description:
+ - Hostname to be set to Virtual Machine when deployed.
+ timezone:
+ description:
+ - Timezone to be set to Virtual Machine when deployed.
+ user_name:
+ description:
+ - Username to be used to set password to Virtual Machine when deployed.
+ root_password:
+ description:
+ - Password to be set for user specified by C(user_name) parameter.
+ authorized_ssh_keys:
+ description:
+ - Use this SSH keys to login to Virtual Machine.
+ regenerate_ssh_keys:
+ description:
+ - If I(True) SSH keys will be regenerated on Virtual Machine.
+ type: bool
+ custom_script:
+ description:
+ - Cloud-init script which will be executed on Virtual Machine when deployed.
+ - This is appended to the end of the cloud-init script generated by any other options.
+ - For further information, refer to cloud-init User-Data documentation.
+ dns_servers:
+ description:
+ - DNS servers to be configured on Virtual Machine, maximum of two, space-separated.
+ dns_search:
+ description:
+ - DNS search domains to be configured on Virtual Machine.
+ nic_boot_protocol:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine.
+ choices: ['none', 'dhcp', 'static']
+ nic_ip_address:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ nic_name:
+ description:
+ - Set name to network interface of Virtual Machine.
+ cloud_init_nics:
+ description:
+ - List of dictionaries representing network interfaces to be setup by cloud init.
+ - This option is used, when user needs to setup more network interfaces via cloud init.
+ - If one network interface is enough, user should use C(cloud_init) I(nic_*) parameters. C(cloud_init) I(nic_*) parameters
+ are merged with C(cloud_init_nics) parameters.
+ type: list
+ elements: dict
+ suboptions:
+ nic_boot_protocol:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
+ nic_ip_address:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ nic_name:
+ description:
+ - Set name to network interface of Virtual Machine.
+ ballooning_enabled:
+ description:
+ - "If I(true), use memory ballooning."
+ - "Memory balloon is a guest device, which may be used to re-distribute / reclaim the host memory
+ based on VM needs in a dynamic way. In this way it's possible to create memory over commitment states."
+ type: bool
+ nics:
+ description:
+ - List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the NIC.
+ profile_name:
+ description:
+ - Profile name where NIC should be attached.
+ interface:
+ description:
+ - Type of the network interface.
+ choices: ['virtio', 'e1000', 'rtl8139']
+ default: 'virtio'
+ mac_address:
+ description:
+ - Custom MAC address of the network interface, by default it's obtained from MAC pool.
+ sysprep:
+ description:
+ - Dictionary with values for Windows Virtual Machine initialization using sysprep.
+ type: dict
+ suboptions:
+ host_name:
+ description:
+ - Hostname to be set to Virtual Machine when deployed.
+ active_directory_ou:
+ description:
+ - Active Directory Organizational Unit, to be used for login of user.
+ org_name:
+ description:
+ - Organization name to be set to Windows Virtual Machine.
+ domain:
+ description:
+ - Domain to be set to Windows Virtual Machine.
+ timezone:
+ description:
+ - Timezone to be set to Windows Virtual Machine.
+ ui_language:
+ description:
+ - UI language of the Windows Virtual Machine.
+ system_locale:
+ description:
+ - System localization of the Windows Virtual Machine.
+ input_locale:
+ description:
+ - Input localization of the Windows Virtual Machine.
+ windows_license_key:
+ description:
+ - License key to be set to Windows Virtual Machine.
+ user_name:
+ description:
+ - Username to be used for set password to Windows Virtual Machine.
+ root_password:
+ description:
+ - Password to be set for username to Windows Virtual Machine.
+ custom_script:
+ description:
+ - A custom Sysprep definition in the format of a complete unattended installation answer file.
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create template from vm
+- ovirt.ovirt.ovirt_template:
+ cluster: Default
+ name: mytemplate
+ vm: rhel7
+ cpu_profile: Default
+ description: Test
+
+# Import template
+- ovirt.ovirt.ovirt_template:
+ state: imported
+ name: mytemplate
+ export_domain: myexport
+ storage_domain: mystorage
+ cluster: mycluster
+
+# Remove template
+- ovirt.ovirt.ovirt_template:
+ state: absent
+ name: mytemplate
+
+# Change Template Name
+- ovirt.ovirt.ovirt_template:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_template_name"
+
+# Register template
+- ovirt.ovirt.ovirt_template:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ name: mytemplate
+
+# Register template using id
+- ovirt.ovirt.ovirt_template:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+
+# Register template, allowing partial import
+- ovirt.ovirt.ovirt_template:
+ state: registered
+ storage_domain: mystorage
+ allow_partial_import: "True"
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+
+# Register template with vnic profile mappings
+- ovirt.ovirt.ovirt_template:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+ vnic_profile_mappings:
+ - source_network_name: mynetwork
+ source_profile_name: mynetwork
+ target_profile_id: 3333-3333-3333-3333
+ - source_network_name: mynetwork2
+ source_profile_name: mynetwork2
+ target_profile_id: 4444-4444-4444-4444
+
+# Register template with mapping
+- ovirt.ovirt.ovirt_template:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+ role_mappings:
+ - source_name: Role_A
+ dest_name: Role_B
+ domain_mappings:
+ - source_name: Domain_A
+ dest_name: Domain_B
+ cluster_mappings:
+ - source_name: cluster_A
+ dest_name: cluster_B
+
+# Import image from Glance s a template
+- ovirt.ovirt.ovirt_template:
+ state: imported
+ name: mytemplate
+ image_disk: "centos7"
+ template_image_disk_name: centos7_from_glance
+ image_provider: "glance_domain"
+ storage_domain: mystorage
+ cluster: mycluster
+
+# Edit template subversion
+- ovirt.ovirt.ovirt_template:
+ cluster: mycluster
+ name: mytemplate
+ vm: rhel7
+ version:
+ number: 2
+ name: subversion
+
+# Create new template subversion
+- ovirt.ovirt.ovirt_template:
+ cluster: mycluster
+ name: mytemplate
+ vm: rhel7
+ version:
+ name: subversion
+
+- name: Template with cloud init
+ ovirt.ovirt.ovirt_template:
+ name: mytemplate
+ cluster: Default
+ vm: rhel8
+ memory: 1GiB
+ cloud_init:
+ dns_servers: '8.8.8.8 8.8.4.4'
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+ nic_name: eth1
+ host_name: example.com
+ custom_script: |
+ write_files:
+ - content: |
+ Hello, world!
+ path: /tmp/greeting.txt
+ permissions: '0644'
+ user_name: root
+ root_password: super_password
+
+- name: Template with cloud init, with multiple network interfaces
+ ovirt.ovirt.ovirt_template:
+ name: mytemplate
+ cluster: mycluster
+ vm: rhel8
+ cloud_init_nics:
+ - nic_name: eth0
+ nic_boot_protocol: dhcp
+ - nic_name: eth1
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+
+- name: Template with timezone and nic
+ ovirt.ovirt.ovirt_template:
+ cluster: MyCluster
+ name: mytemplate
+ vm: rhel8
+ timezone: America/Godthab
+ memory_max: 2Gib
+ nics:
+ - name: nic1
+
+- name: Template with sysprep
+ ovirt.ovirt.ovirt_template:
+ name: windows2012R2_AD
+ cluster: Default
+ vm: windows2012
+ memory: 3GiB
+ sysprep:
+ host_name: windowsad.example.com
+ user_name: Administrator
+ root_password: SuperPassword123
+'''
+
+RETURN = '''
+id:
+ description: ID of the template which is managed
+ returned: On success if template is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+template:
+ description: "Dictionary of all the template attributes. Template attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success if template is found.
+ type: dict
+'''
+
+import time
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ convert_to_bytes,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ get_link_name,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+)
+
+
+class TemplatesModule(BaseModule):
+
+ def __init__(self, *args, **kwargs):
+ super(TemplatesModule, self).__init__(*args, **kwargs)
+ self._initialization = None
+
+ def build_entity(self):
+ return otypes.Template(
+ id=self._module.params['id'],
+ name=self._module.params['name'],
+ cluster=otypes.Cluster(
+ name=self._module.params['cluster']
+ ) if self._module.params['cluster'] else None,
+ vm=otypes.Vm(
+ name=self._module.params['vm']
+ ) if self._module.params['vm'] else None,
+ description=self._module.params['description'],
+ cpu_profile=otypes.CpuProfile(
+ id=search_by_name(
+ self._connection.system_service().cpu_profiles_service(),
+ self._module.params['cpu_profile'],
+ ).id
+ ) if self._module.params['cpu_profile'] else None,
+ display=otypes.Display(
+ smartcard_enabled=self.param('smartcard_enabled')
+ ) if self.param('smartcard_enabled') is not None else None,
+ os=otypes.OperatingSystem(
+ type=self.param('operating_system'),
+ ) if self.param('operating_system') else None,
+ memory=convert_to_bytes(
+ self.param('memory')
+ ) if self.param('memory') else None,
+ soundcard_enabled=self.param('soundcard_enabled'),
+ usb=(
+ otypes.Usb(enabled=self.param('usb_support'))
+ ) if self.param('usb_support') is not None else None,
+ sso=(
+ otypes.Sso(
+ methods=[otypes.Method(id=otypes.SsoMethod.GUEST_AGENT)] if self.param('sso') else []
+ )
+ ) if self.param('sso') is not None else None,
+ time_zone=otypes.TimeZone(
+ name=self.param('timezone'),
+ ) if self.param('timezone') else None,
+ version=otypes.TemplateVersion(
+ base_template=self._get_base_template(),
+ version_name=self.param('version').get('name'),
+ ) if self.param('version') else None,
+ memory_policy=otypes.MemoryPolicy(
+ guaranteed=convert_to_bytes(self.param('memory_guaranteed')),
+ ballooning=self.param('ballooning_enabled'),
+ max=convert_to_bytes(self.param('memory_max')),
+ ) if any((
+ self.param('memory_guaranteed'),
+ self.param('ballooning_enabled'),
+ self.param('memory_max')
+ )) else None,
+ io=otypes.Io(
+ threads=self.param('io_threads'),
+ ) if self.param('io_threads') is not None else None,
+ initialization=self.get_initialization(),
+ )
+
+ def _get_base_template(self):
+ templates = self._connection.system_service().templates_service().list()
+ for template in templates:
+ if template.version.version_number == 1 and template.name == self.param('name'):
+ return otypes.Template(
+ id=template.id
+ )
+
+ def post_update(self, entity):
+ self.post_present(entity.id)
+
+ def post_present(self, entity_id):
+ # After creation of the VM, attach disks and NICs:
+ entity = self._service.service(entity_id).get()
+ self.__attach_nics(entity)
+
+ def __get_vnic_profile_id(self, nic):
+ """
+ Return VNIC profile ID looked up by it's name, because there can be
+ more VNIC profiles with same name, other criteria of filter is cluster.
+ """
+ vnics_service = self._connection.system_service().vnic_profiles_service()
+ clusters_service = self._connection.system_service().clusters_service()
+ cluster = search_by_name(clusters_service, self.param('cluster'))
+ profiles = [
+ profile for profile in vnics_service.list()
+ if profile.name == nic.get('profile_name')
+ ]
+ cluster_networks = [
+ net.id for net in self._connection.follow_link(cluster.networks)
+ ]
+ try:
+ return next(
+ profile.id for profile in profiles
+ if profile.network.id in cluster_networks
+ )
+ except StopIteration:
+ raise Exception(
+ "Profile '%s' was not found in cluster '%s'" % (
+ nic.get('profile_name'),
+ self.param('cluster')
+ )
+ )
+
+ def __attach_nics(self, entity):
+ # Attach NICs to VM, if specified:
+ nics_service = self._service.service(entity.id).nics_service()
+ for nic in self.param('nics'):
+ if search_by_name(nics_service, nic.get('name')) is None:
+ if not self._module.check_mode:
+ nics_service.add(
+ otypes.Nic(
+ name=nic.get('name'),
+ interface=otypes.NicInterface(
+ nic.get('interface', 'virtio')
+ ),
+ vnic_profile=otypes.VnicProfile(
+ id=self.__get_vnic_profile_id(nic),
+ ) if nic.get('profile_name') else None,
+ mac=otypes.Mac(
+ address=nic.get('mac_address')
+ ) if nic.get('mac_address') else None,
+ )
+ )
+ self.changed = True
+
+ def get_initialization(self):
+ if self._initialization is not None:
+ return self._initialization
+
+ sysprep = self.param('sysprep')
+ cloud_init = self.param('cloud_init')
+ cloud_init_nics = self.param('cloud_init_nics') or []
+ if cloud_init is not None:
+ cloud_init_nics.append(cloud_init)
+
+ if cloud_init or cloud_init_nics:
+ self._initialization = otypes.Initialization(
+ nic_configurations=[
+ otypes.NicConfiguration(
+ boot_protocol=otypes.BootProtocol(
+ nic.pop('nic_boot_protocol').lower()
+ ) if nic.get('nic_boot_protocol') else None,
+ name=nic.pop('nic_name', None),
+ on_boot=True,
+ ip=otypes.Ip(
+ address=nic.pop('nic_ip_address', None),
+ netmask=nic.pop('nic_netmask', None),
+ gateway=nic.pop('nic_gateway', None),
+ ) if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None
+ ) else None,
+ )
+ for nic in cloud_init_nics
+ if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None or
+ nic.get('nic_boot_protocol') is not None
+ )
+ ] if cloud_init_nics else None,
+ **cloud_init
+ )
+ elif sysprep:
+ self._initialization = otypes.Initialization(
+ **sysprep
+ )
+ return self._initialization
+
+ def update_check(self, entity):
+ template_display = entity.display
+ return (
+ equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self.param('operating_system'), str(entity.os.type)) and
+ equal(self.param('name'), str(entity.name)) and
+ equal(self.param('smartcard_enabled'), getattr(template_display, 'smartcard_enabled', False)) and
+ equal(self.param('soundcard_enabled'), entity.soundcard_enabled) and
+ equal(self.param('ballooning_enabled'), entity.memory_policy.ballooning) and
+ equal(self.param('sso'), True if entity.sso.methods else False) and
+ equal(self.param('timezone'), getattr(entity.time_zone, 'name', None)) and
+ equal(self.param('usb_support'), entity.usb.enabled) and
+ equal(convert_to_bytes(self.param('memory_guaranteed')), entity.memory_policy.guaranteed) and
+ equal(convert_to_bytes(self.param('memory_max')), entity.memory_policy.max) and
+ equal(convert_to_bytes(self.param('memory')), entity.memory) and
+ equal(self._module.params.get('cpu_profile'), get_link_name(self._connection, entity.cpu_profile)) and
+ equal(self.param('io_threads'), entity.io.threads)
+ )
+
+ def _get_export_domain_service(self):
+ provider_name = self._module.params['export_domain'] or self._module.params['image_provider']
+ export_sds_service = self._connection.system_service().storage_domains_service()
+ export_sd = search_by_name(export_sds_service, provider_name)
+ if export_sd is None:
+ raise ValueError(
+ "Export storage domain/Image Provider '%s' wasn't found." % provider_name
+ )
+
+ return export_sds_service.service(export_sd.id)
+
+ def post_export_action(self, entity):
+ self._service = self._get_export_domain_service().templates_service()
+
+ def post_import_action(self, entity):
+ self._service = self._connection.system_service().templates_service()
+
+
+def _get_role_mappings(module):
+ roleMappings = list()
+
+ for roleMapping in module.params['role_mappings']:
+ roleMappings.append(
+ otypes.RegistrationRoleMapping(
+ from_=otypes.Role(
+ name=roleMapping['source_name'],
+ ) if roleMapping['source_name'] else None,
+ to=otypes.Role(
+ name=roleMapping['dest_name'],
+ ) if roleMapping['dest_name'] else None,
+ )
+ )
+ return roleMappings
+
+
+def _get_domain_mappings(module):
+ domainMappings = list()
+
+ for domainMapping in module.params['domain_mappings']:
+ domainMappings.append(
+ otypes.RegistrationDomainMapping(
+ from_=otypes.Domain(
+ name=domainMapping['source_name'],
+ ) if domainMapping['source_name'] else None,
+ to=otypes.Domain(
+ name=domainMapping['dest_name'],
+ ) if domainMapping['dest_name'] else None,
+ )
+ )
+ return domainMappings
+
+
+def _get_cluster_mappings(module):
+ clusterMappings = list()
+
+ for clusterMapping in module.params['cluster_mappings']:
+ clusterMappings.append(
+ otypes.RegistrationClusterMapping(
+ from_=otypes.Cluster(
+ name=clusterMapping['source_name'],
+ ),
+ to=otypes.Cluster(
+ name=clusterMapping['dest_name'],
+ ),
+ )
+ )
+ return clusterMappings
+
+
+def _get_vnic_profile_mappings(module):
+ vnicProfileMappings = list()
+
+ for vnicProfileMapping in module.params['vnic_profile_mappings']:
+ vnicProfileMappings.append(
+ otypes.VnicProfileMapping(
+ source_network_name=vnicProfileMapping['source_network_name'],
+ source_network_profile_name=vnicProfileMapping['source_profile_name'],
+ target_vnic_profile=otypes.VnicProfile(
+ id=vnicProfileMapping['target_profile_id'],
+ ) if vnicProfileMapping['target_profile_id'] else None,
+ )
+ )
+
+ return vnicProfileMappings
+
+
+def find_subversion_template(module, templates_service):
+ version = module.params.get('version')
+ templates = templates_service.list()
+ for template in templates:
+ if version.get('number') == template.version.version_number and module.params.get('name') == template.name:
+ return template
+
+ # when user puts version number which does not exist
+ raise ValueError(
+ "Template with name '%s' and version '%s' in cluster '%s' was not found'" % (
+ module.params['name'],
+ module.params['version']['number'],
+ module.params['cluster'],
+ )
+ )
+
+
+def searchable_attributes(module):
+ """
+ Return all searchable template attributes passed to module.
+ """
+ attributes = {
+ 'name': module.params.get('name'),
+ 'cluster': module.params.get('cluster'),
+ }
+ return dict((k, v) for k, v in attributes.items() if v is not None)
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'exported', 'imported', 'registered'],
+ default='present',
+ ),
+ id=dict(default=None),
+ name=dict(default=None),
+ vm=dict(default=None),
+ timezone=dict(type='str'),
+ description=dict(default=None),
+ sso=dict(type='bool'),
+ ballooning_enabled=dict(type='bool', default=None),
+ cluster=dict(default=None),
+ usb_support=dict(type='bool'),
+ allow_partial_import=dict(default=None, type='bool'),
+ cpu_profile=dict(default=None),
+ clone_permissions=dict(type='bool'),
+ export_domain=dict(default=None),
+ storage_domain=dict(default=None),
+ exclusive=dict(type='bool'),
+ clone_name=dict(default=None),
+ image_provider=dict(default=None),
+ soundcard_enabled=dict(type='bool', default=None),
+ smartcard_enabled=dict(type='bool', default=None),
+ image_disk=dict(default=None, aliases=['glance_image_disk_name']),
+ io_threads=dict(type='int', default=None),
+ template_image_disk_name=dict(default=None),
+ version=dict(default=None, type='dict'),
+ seal=dict(type='bool'),
+ vnic_profile_mappings=dict(default=[], type='list', elements='dict'),
+ cluster_mappings=dict(default=[], type='list', elements='dict'),
+ role_mappings=dict(default=[], type='list', elements='dict'),
+ domain_mappings=dict(default=[], type='list', elements='dict'),
+ operating_system=dict(type='str'),
+ memory=dict(type='str'),
+ memory_guaranteed=dict(type='str'),
+ memory_max=dict(type='str'),
+ nics=dict(type='list', default=[], elements='dict'),
+ cloud_init=dict(type='dict'),
+ cloud_init_nics=dict(type='list', default=[], elements='dict'),
+ sysprep=dict(type='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['id', 'name']],
+ )
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ templates_service = connection.system_service().templates_service()
+ templates_module = TemplatesModule(
+ connection=connection,
+ module=module,
+ service=templates_service,
+ )
+
+ entity = None
+ if module.params['version'] is not None and module.params['version'].get('number') is not None:
+ entity = find_subversion_template(module, templates_service)
+
+ state = module.params['state']
+ if state == 'present':
+ force_create = False
+ if entity is None and module.params['version'] is not None:
+ force_create = True
+
+ ret = templates_module.create(
+ entity=entity,
+ # When user want to create new template subversion, we must make sure
+ # template is force created as it already exists, but new version should be created.
+ force_create=force_create,
+ result_state=otypes.TemplateStatus.OK,
+ search_params=searchable_attributes(module),
+ clone_permissions=module.params['clone_permissions'],
+ seal=module.params['seal'],
+ )
+ elif state == 'absent':
+ ret = templates_module.remove(entity=entity)
+ elif state == 'exported':
+ template = templates_module.search_entity()
+ if entity is not None:
+ template = entity
+ export_service = templates_module._get_export_domain_service()
+ export_template = search_by_attributes(export_service.templates_service(), id=template.id)
+
+ ret = templates_module.action(
+ entity=template,
+ action='export',
+ action_condition=lambda t: export_template is None or module.params['exclusive'],
+ wait_condition=lambda t: t is not None,
+ post_action=templates_module.post_export_action,
+ storage_domain=otypes.StorageDomain(id=export_service.get().id),
+ exclusive=module.params['exclusive'],
+ )
+ elif state == 'imported':
+ template = templates_module.search_entity()
+ if entity is not None:
+ template = entity
+ if template and module.params['clone_name'] is None:
+ ret = templates_module.create(
+ result_state=otypes.TemplateStatus.OK,
+ )
+ else:
+ kwargs = {}
+ if module.params['image_provider']:
+ kwargs.update(
+ disk=otypes.Disk(
+ name=module.params['template_image_disk_name'] or module.params['image_disk']
+ ),
+ template=otypes.Template(
+ name=module.params['name'] if module.params['clone_name'] is None else module.params['clone_name'],
+ ),
+ clone=True if module.params['clone_name'] is not None else False,
+ import_as_template=True,
+ )
+
+ if module.params['image_disk']:
+ # We need to refresh storage domain to get list of images:
+ templates_module._get_export_domain_service().images_service().list()
+
+ glance_service = connection.system_service().openstack_image_providers_service()
+ image_provider = search_by_name(glance_service, module.params['image_provider'])
+ images_service = glance_service.service(image_provider.id).images_service()
+ else:
+ images_service = templates_module._get_export_domain_service().templates_service()
+ template_name = module.params['image_disk'] or module.params['name']
+ entity = search_by_name(images_service, template_name)
+ if entity is None:
+ raise Exception("Image/template '%s' was not found." % template_name)
+
+ images_service.service(entity.id).import_(
+ storage_domain=otypes.StorageDomain(
+ name=module.params['storage_domain']
+ ) if module.params['storage_domain'] else None,
+ cluster=otypes.Cluster(
+ name=module.params['cluster']
+ ) if module.params['cluster'] else None,
+ **kwargs
+ )
+ # Wait for template to appear in system:
+ template = templates_module.wait_for_import(
+ condition=lambda t: t.status == otypes.TemplateStatus.OK
+ )
+ if template is None:
+ raise TimeoutError("Image/template '%s' could not be imported. Try again with larger timeout." % template_name)
+ ret = templates_module.create(result_state=otypes.TemplateStatus.OK)
+ elif state == 'registered':
+ storage_domains_service = connection.system_service().storage_domains_service()
+ # Find the storage domain with unregistered template:
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ templates_service = storage_domain_service.templates_service()
+
+ # Find the unregistered Template we want to register:
+ templates = templates_service.list(unregistered=True)
+ template = next(
+ (t for t in templates if (t.id == module.params['id'] or t.name == module.params['name'])),
+ None
+ )
+ changed = False
+ if template is None:
+ template = templates_module.search_entity()
+ if template is None:
+ raise ValueError(
+ "Template '%s(%s)' wasn't found." % (module.params['name'], module.params['id'])
+ )
+ else:
+ # Register the template into the system:
+ changed = True
+ template_service = templates_service.template_service(template.id)
+ template_service.register(
+ allow_partial_import=module.params['allow_partial_import'],
+ cluster=otypes.Cluster(
+ name=module.params['cluster']
+ ) if module.params['cluster'] else None,
+ vnic_profile_mappings=_get_vnic_profile_mappings(module)
+ if module.params['vnic_profile_mappings'] else None,
+ registration_configuration=otypes.RegistrationConfiguration(
+ cluster_mappings=_get_cluster_mappings(module),
+ role_mappings=_get_role_mappings(module),
+ domain_mappings=_get_domain_mappings(module),
+ ) if (module.params['cluster_mappings']
+ or module.params['role_mappings']
+ or module.params['domain_mappings']) else None
+ )
+
+ if module.params['wait']:
+ template = templates_module.wait_for_import()
+ else:
+ # Fetch template to initialize return.
+ template = template_service.get()
+ ret = templates_module.create(result_state=otypes.TemplateStatus.OK)
+ ret = {
+ 'changed': changed,
+ 'id': template.id,
+ 'template': get_dict_of_struct(template)
+ }
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template_info.py
new file mode 100644
index 00000000..8da88be1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_template_info.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_template_info
+short_description: Retrieve information about one or more oVirt/RHV templates
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV templates."
+ - This module was called C(ovirt_template_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_template_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search template X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all templates which names start with C(centos) and
+# belongs to data center C(west):
+- ovirt.ovirt.ovirt_template_info:
+ pattern: name=centos* and datacenter=west
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_templates }}"
+'''
+
+RETURN = '''
+ovirt_templates:
+ description: "List of dictionaries describing the templates. Template attributes are mapped to dictionary keys,
+ all templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ templates_service = connection.system_service().templates_service()
+ templates = templates_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user.py
new file mode 100644
index 00000000..341e1331
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_user
+short_description: Module to manage users in oVirt/RHV
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage users in oVirt/RHV."
+options:
+ name:
+ description:
+ - "Name of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the user be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ authz_name:
+ description:
+ - "Authorization provider of the user. In previous versions of oVirt/RHV known as domain."
+ required: true
+ aliases: ['domain']
+ type: str
+ namespace:
+ description:
+ - "Namespace where the user resides. When using the authorization provider that stores users in the LDAP server,
+ this attribute equals the naming context of the LDAP server."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add user user1 from authorization provider example.com-authz
+- ovirt.ovirt.ovirt_user:
+ name: user1
+ domain: example.com-authz
+
+# Add user user1 from authorization provider example.com-authz
+# In case of Active Directory specify UPN:
+- ovirt.ovirt.ovirt_user:
+ name: user1@ad2.example.com
+ domain: example.com-authz
+
+# Remove user user1 with authorization provider example.com-authz
+- ovirt.ovirt.ovirt_user:
+ state: absent
+ name: user1
+ authz_name: example.com-authz
+'''
+
+RETURN = '''
+id:
+ description: ID of the user which is managed
+ returned: On success if user is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+user:
+ description: "Dictionary of all the user attributes. User attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user."
+ returned: On success if user is found.
+ type: dict
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ ovirt_full_argument_spec,
+)
+
+
+def username(module):
+ return '{0}@{1}'.format(module.params['name'], module.params['authz_name'])
+
+
+class UsersModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.User(
+ domain=otypes.Domain(
+ name=self._module.params['authz_name']
+ ),
+ user_name=username(self._module),
+ principal=self._module.params['name'],
+ namespace=self._module.params['namespace'],
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ authz_name=dict(required=True, aliases=['domain']),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ users_service = connection.system_service().users_service()
+ users_module = UsersModule(
+ connection=connection,
+ module=module,
+ service=users_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = users_module.create(
+ search_params={
+ 'usrname': username(module),
+ }
+ )
+ elif state == 'absent':
+ ret = users_module.remove(
+ search_params={
+ 'usrname': username(module),
+ }
+ )
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user_info.py
new file mode 100644
index 00000000..251814e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_user_info.py
@@ -0,0 +1,109 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_user_info
+short_description: Retrieve information about one or more oVirt/RHV users
+version_added: "1.0.0"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Retrieve information about one or more oVirt/RHV users."
+ - This module was called C(ovirt_user_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_user_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_users), which
+ contains a list of users. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search user X use following pattern: name=X"
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all users which first names start with C(john):
+- ovirt.ovirt.ovirt_user_info:
+ pattern: name=john*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_users }}"
+'''
+
+RETURN = '''
+ovirt_users:
+ description: "List of dictionaries describing the users. User attributes are mapped to dictionary keys,
+ all users attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ users_service = connection.system_service().users_service()
+ users = users_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_users=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in users
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm.py
new file mode 100644
index 00000000..6a7a0d9c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm.py
@@ -0,0 +1,2784 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vm
+short_description: Module to manage Virtual Machines in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - This module manages whole lifecycle of the Virtual Machine(VM) in oVirt/RHV.
+ - Since VM can hold many states in oVirt/RHV, this see notes to see how the states of the VM are handled.
+options:
+ name:
+ description:
+ - Name of the Virtual Machine to manage.
+ - If VM don't exists C(name) is required. Otherwise C(id) or C(name) can be used.
+ type: str
+ id:
+ description:
+ - ID of the Virtual Machine to manage.
+ type: str
+ state:
+ description:
+ - Should the Virtual Machine be running/stopped/present/absent/suspended/next_run/registered/exported/reboot.
+ When C(state) is I(registered) and the unregistered VM's name
+ belongs to an already registered in engine VM in the same DC
+ then we fail to register the unregistered template.
+ - I(present) state will create/update VM and don't change its state if it already exists.
+ - I(running) state will create/update VM and start it.
+ - I(next_run) state updates the VM and if the VM has next run configuration it will be rebooted.
+ - Please check I(notes) to more detailed description of states.
+ - I(exported) state will export the VM to export domain or as OVA.
+ - I(registered) is supported since 2.4.
+ - I(reboot) is supported since 2.10, virtual machine is rebooted only if it's in up state.
+ choices: [ absent, next_run, present, registered, running, stopped, suspended, exported, reboot ]
+ default: present
+ type: str
+ cluster:
+ description:
+ - Name of the cluster, where Virtual Machine should be created.
+ - Required if creating VM.
+ type: str
+ allow_partial_import:
+ description:
+ - Boolean indication whether to allow partial registration of Virtual Machine when C(state) is registered.
+ type: bool
+ vnic_profile_mappings:
+ description:
+ - "Mapper which maps an external virtual NIC profile to one that exists in the engine when C(state) is registered.
+ vnic_profile is described by the following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ source_network_name:
+ description:
+ - The network name of the source network.
+ source_profile_name:
+ description:
+ - The profile name related to the source network.
+ target_profile_id:
+ description:
+ - The id of the target profile id to be mapped to in the engine.
+ cluster_mappings:
+ description:
+ - "Mapper which maps cluster name between VM's OVF and the destination cluster this VM should be registered to,
+ relevant when C(state) is registered.
+ Cluster mapping is described by the following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ source_name:
+ description:
+ - The name of the source cluster.
+ dest_name:
+ description:
+ - The name of the destination cluster.
+ role_mappings:
+ description:
+ - "Mapper which maps role name between VM's OVF and the destination role this VM should be registered to,
+ relevant when C(state) is registered.
+ Role mapping is described by the following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ source_name:
+ description:
+ - The name of the source role.
+ dest_name:
+ description:
+ - The name of the destination role.
+ domain_mappings:
+ description:
+ - "Mapper which maps aaa domain name between VM's OVF and the destination aaa domain this VM should be registered to,
+ relevant when C(state) is registered.
+ The aaa domain mapping is described by the following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ source_name:
+ description:
+ - The name of the source aaa domain.
+ dest_name:
+ description:
+ - The name of the destination aaa domain.
+ affinity_group_mappings:
+ type: list
+ description:
+ - "Mapper which maps affinity name between VM's OVF and the destination affinity this VM should be registered to,
+ relevant when C(state) is registered."
+ elements: dict
+ affinity_label_mappings:
+ type: list
+ description:
+ - "Mapper which maps affinity label name between VM's OVF and the destination label this VM should be registered to,
+ relevant when C(state) is registered."
+ elements: dict
+ lun_mappings:
+ description:
+ - "Mapper which maps lun between VM's OVF and the destination lun this VM should contain, relevant when C(state) is registered.
+ lun_mappings is described by the following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ logical_unit_id:
+ description:
+ - The logical unit number to identify a logical unit,
+ logical_unit_port:
+ description:
+ - The port being used to connect with the LUN disk.
+ logical_unit_portal:
+ description:
+ - The portal being used to connect with the LUN disk.
+ logical_unit_address:
+ description:
+ - The address of the block storage host.
+ logical_unit_target:
+ description:
+ - The iSCSI specification located on an iSCSI server
+ logical_unit_username:
+ description:
+ - Username to be used to connect to the block storage host.
+ logical_unit_password):
+ description:
+ - Password to be used to connect to the block storage host.
+ storage_type:
+ description:
+ - The storage type which the LUN reside on (iscsi or fcp)"
+ reassign_bad_macs:
+ description:
+ - "Boolean indication whether to reassign bad macs when C(state) is registered."
+ type: bool
+ template:
+ description:
+ - Name of the template, which should be used to create Virtual Machine.
+ - Required if creating VM.
+ - If template is not specified and VM doesn't exist, VM will be created from I(Blank) template.
+ type: str
+ template_version:
+ description:
+ - Version number of the template to be used for VM.
+ - By default the latest available version of the template is used.
+ type: int
+ use_latest_template_version:
+ description:
+ - Specify if latest template version should be used, when running a stateless VM.
+ - If this parameter is set to I(yes) stateless VM is created.
+ type: bool
+ storage_domain:
+ description:
+ - Name of the storage domain where all template disks should be created.
+ - This parameter is considered only when C(template) is provided.
+ - IMPORTANT - This parameter is not idempotent, if the VM exists and you specify different storage domain,
+ disk won't move.
+ type: str
+ disk_format:
+ description:
+ - Specify format of the disk.
+ - If C(cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision).
+ - If C(raw) format is used, disk storage will be allocated right away, also known as I(preallocated).
+ - Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
+ - This parameter is considered only when C(template) and C(storage domain) is provided.
+ choices: [ cow, raw ]
+ default: cow
+ type: str
+ memory:
+ description:
+ - Amount of memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ type: str
+ memory_guaranteed:
+ description:
+ - Amount of minimal guaranteed memory of the Virtual Machine.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
+ - Default value is set by engine.
+ type: str
+ memory_max:
+ description:
+ - Upper bound of virtual machine memory up to which memory hot-plug can be performed.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ type: str
+ cpu_shares:
+ description:
+ - Set a CPU shares for this Virtual Machine.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ cpu_cores:
+ description:
+ - Number of virtual CPUs cores of the Virtual Machine.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ cpu_sockets:
+ description:
+ - Number of virtual CPUs sockets of the Virtual Machine.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ cpu_threads:
+ description:
+ - Number of threads per core of the Virtual Machine.
+ - Default value is set by oVirt/RHV engine.
+ type: int
+ type:
+ description:
+ - Type of the Virtual Machine.
+ - Default value is set by oVirt/RHV engine.
+ - I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2.
+ choices: [ desktop, server, high_performance ]
+ type: str
+ quota_id:
+ description:
+ - "Virtual Machine quota ID to be used for disk. By default quota is chosen by oVirt/RHV engine."
+ type: str
+ operating_system:
+ description:
+ - Operating system of the Virtual Machine, for example 'rhel_8x64'.
+ - Default value is set by oVirt/RHV engine.
+ - Use the M(ovirt.ovirt.ovirt_vm_os_info) module to obtain the current list.
+ type: str
+ boot_devices:
+ description:
+ - List of boot devices which should be used to boot. For example C([ cdrom, hd ]).
+ - Default value is set by oVirt/RHV engine.
+ choices: [ cdrom, hd, network ]
+ elements: str
+ type: list
+ boot_menu:
+ description:
+ - "I(True) enable menu to select boot device, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ bios_type:
+ description:
+ - "Set bios type, necessary for some operating systems and secure boot."
+ - "If no value is passed, default value is set from cluster."
+ - "NOTE - Supported since oVirt 4.3."
+ choices: [ i440fx_sea_bios, q35_ovmf, q35_sea_bios, q35_secure_boot ]
+ type: str
+ usb_support:
+ description:
+ - "I(True) enable USB support, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ serial_console:
+ description:
+ - "I(True) enable VirtIO serial console, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ sso:
+ description:
+ - "I(True) enable Single Sign On by Guest Agent, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ host:
+ description:
+ - Specify host where Virtual Machine should be running. By default the host is chosen by engine scheduler.
+ - This parameter is used only when C(state) is I(running) or I(present).
+ type: str
+ high_availability:
+ description:
+ - If I(yes) Virtual Machine will be set as highly available.
+ - If I(no) Virtual Machine won't be set as highly available.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: bool
+ high_availability_priority:
+ description:
+ - Indicates the priority of the virtual machine inside the run and migration queues.
+ Virtual machines with higher priorities will be started and migrated before virtual machines with lower
+ priorities. The value is an integer between 0 and 100. The higher the value, the higher the priority.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: int
+ lease:
+ description:
+ - Name of the storage domain this virtual machine lease reside on. Pass an empty string to remove the lease.
+ - NOTE - Supported since oVirt 4.1.
+ type: str
+ custom_compatibility_version:
+ description:
+ - "Enables a virtual machine to be customized to its own compatibility version. If
+ 'C(custom_compatibility_version)' is set, it overrides the cluster's compatibility version
+ for this particular virtual machine."
+ type: str
+ host_devices:
+ description:
+ - Single Root I/O Virtualization - technology that allows single device to expose multiple endpoints that can be passed to VMs
+ - host_devices is an list which contain dictionary with name and state of device
+ type: list
+ elements: dict
+ delete_protected:
+ description:
+ - If I(yes) Virtual Machine will be set as delete protected.
+ - If I(no) Virtual Machine won't be set as delete protected.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: bool
+ stateless:
+ description:
+ - If I(yes) Virtual Machine will be set as stateless.
+ - If I(no) Virtual Machine will be unset as stateless.
+ - If no value is passed, default value is set by oVirt/RHV engine.
+ type: bool
+ clone:
+ description:
+ - If I(yes) then the disks of the created virtual machine will be cloned and independent of the template.
+ - This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before.
+ type: bool
+ default: 'no'
+ clone_permissions:
+ description:
+ - If I(yes) then the permissions of the template (only the direct ones, not the inherited ones)
+ will be copied to the created virtual machine.
+ - This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before.
+ type: bool
+ default: 'no'
+ cd_iso:
+ description:
+ - ISO file from ISO storage domain which should be attached to Virtual Machine.
+ - If you have multiple ISO disks with the same name use disk ID to specify which should be used or use C(storage_domain) to filter disks.
+ - If you pass empty string the CD will be ejected from VM.
+ - If used with C(state) I(running) or I(present) and VM is running the CD will be attached to VM.
+ - If used with C(state) I(running) or I(present) and VM is down the CD will be attached to VM persistently.
+ type: str
+ force:
+ description:
+ - Please check to I(Synopsis) to more detailed description of force parameter, it can behave differently
+ in different situations.
+ type: bool
+ default: 'no'
+ nics:
+ description:
+ - List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the NIC.
+ profile_name:
+ description:
+ - Profile name where NIC should be attached.
+ interface:
+ description:
+ - Type of the network interface.
+ choices: ['virtio', 'e1000', 'rtl8139']
+ default: 'virtio'
+ mac_address:
+ description:
+ - Custom MAC address of the network interface, by default it's obtained from MAC pool.
+ - "NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
+ To manage NICs of the VM in more depth please use M(ovirt.ovirt.ovirt_nic) module instead."
+ disks:
+ description:
+ - List of disks, which should be attached to Virtual Machine. Disk is described by following dictionary.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the disk. Either C(name) or C(id) is required.
+ id:
+ description:
+ - ID of the disk. Either C(name) or C(id) is required.
+ interface:
+ description:
+ - Interface of the disk.
+ choices: ['virtio', 'ide']
+ default: 'virtio'
+ bootable:
+ description:
+ - I(True) if the disk should be bootable, default is non bootable.
+ type: bool
+ activate:
+ description:
+ - I(True) if the disk should be activated, default is activated.
+ - "NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only attach disks.
+ To manage disks of the VM in more depth please use M(ovirt.ovirt.ovirt_disk) module instead."
+ type: bool
+ sysprep:
+ description:
+ - Dictionary with values for Windows Virtual Machine initialization using sysprep.
+ type: dict
+ suboptions:
+ host_name:
+ description:
+ - Hostname to be set to Virtual Machine when deployed.
+ active_directory_ou:
+ description:
+ - Active Directory Organizational Unit, to be used for login of user.
+ org_name:
+ description:
+ - Organization name to be set to Windows Virtual Machine.
+ domain:
+ description:
+ - Domain to be set to Windows Virtual Machine.
+ timezone:
+ description:
+ - Timezone to be set to Windows Virtual Machine.
+ ui_language:
+ description:
+ - UI language of the Windows Virtual Machine.
+ system_locale:
+ description:
+ - System localization of the Windows Virtual Machine.
+ input_locale:
+ description:
+ - Input localization of the Windows Virtual Machine.
+ windows_license_key:
+ description:
+ - License key to be set to Windows Virtual Machine.
+ user_name:
+ description:
+ - Username to be used for set password to Windows Virtual Machine.
+ root_password:
+ description:
+ - Password to be set for username to Windows Virtual Machine.
+ custom_script:
+ description:
+ - A custom Sysprep definition in the format of a complete unattended installation answer file.
+ cloud_init:
+ description:
+ - Dictionary with values for Unix-like Virtual Machine initialization using cloud init.
+ type: dict
+ suboptions:
+ host_name:
+ description:
+ - Hostname to be set to Virtual Machine when deployed.
+ timezone:
+ description:
+ - Timezone to be set to Virtual Machine when deployed.
+ user_name:
+ description:
+ - Username to be used to set password to Virtual Machine when deployed.
+ root_password:
+ description:
+ - Password to be set for user specified by C(user_name) parameter.
+ authorized_ssh_keys:
+ description:
+ - Use this SSH keys to login to Virtual Machine.
+ regenerate_ssh_keys:
+ description:
+ - If I(True) SSH keys will be regenerated on Virtual Machine.
+ type: bool
+ custom_script:
+ description:
+ - Cloud-init script which will be executed on Virtual Machine when deployed.
+ - This is appended to the end of the cloud-init script generated by any other options.
+ - For further information, refer to cloud-init User-Data documentation.
+ dns_servers:
+ description:
+ - DNS servers to be configured on Virtual Machine, maximum of two, space-separated.
+ dns_search:
+ description:
+ - DNS search domains to be configured on Virtual Machine.
+ nic_boot_protocol:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine.
+ choices: ['none', 'dhcp', 'static']
+ nic_ip_address:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ nic_boot_protocol_v6:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine.
+ choices: ['none', 'dhcp', 'static']
+ nic_ip_address_v6:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask_v6:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway_v6:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ - For IPv6 addresses the value is an integer in the range of 0-128, which represents the subnet prefix.
+ nic_name:
+ description:
+ - Set name to network interface of Virtual Machine.
+ cloud_init_nics:
+ description:
+ - List of dictionaries representing network interfaces to be setup by cloud init.
+ - This option is used, when user needs to setup more network interfaces via cloud init.
+ - If one network interface is enough, user should use C(cloud_init) I(nic_*) parameters. C(cloud_init) I(nic_*) parameters
+ are merged with C(cloud_init_nics) parameters.
+ type: list
+ elements: dict
+ suboptions:
+ nic_boot_protocol:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
+ nic_ip_address:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ nic_boot_protocol_v6:
+ description:
+ - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
+ nic_ip_address_v6:
+ description:
+ - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ nic_netmask_v6:
+ description:
+ - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ nic_gateway_v6:
+ description:
+ - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ - For IPv6 addresses the value is an integer in the range of 0-128, which represents the subnet prefix.
+ nic_name:
+ description:
+ - Set name to network interface of Virtual Machine.
+ cloud_init_persist:
+ description:
+ - "If I(yes) the C(cloud_init) or C(sysprep) parameters will be saved for the virtual machine
+ and the virtual machine won't be started as run-once."
+ type: bool
+ aliases: [ 'sysprep_persist' ]
+ default: 'no'
+ kernel_params_persist:
+ description:
+ - "If I(true) C(kernel_params), C(initrd_path) and C(kernel_path) will persist in virtual machine configuration,
+ if I(False) it will be used for run once."
+ type: bool
+ kernel_path:
+ description:
+ - Path to a kernel image used to boot the virtual machine.
+ - Kernel image must be stored on either the ISO domain or on the host's storage.
+ type: str
+ initrd_path:
+ description:
+ - Path to an initial ramdisk to be used with the kernel specified by C(kernel_path) option.
+ - Ramdisk image must be stored on either the ISO domain or on the host's storage.
+ type: str
+ kernel_params:
+ description:
+ - Kernel command line parameters (formatted as string) to be used with the kernel specified by C(kernel_path) option.
+ type: str
+ instance_type:
+ description:
+ - Name of virtual machine's hardware configuration.
+ - By default no instance type is used.
+ type: str
+ description:
+ description:
+ - Description of the Virtual Machine.
+ type: str
+ comment:
+ description:
+ - Comment of the Virtual Machine.
+ type: str
+ timezone:
+ description:
+ - Sets time zone offset of the guest hardware clock.
+ - For example C(Etc/GMT)
+ type: str
+ serial_policy:
+ description:
+ - Specify a serial number policy for the Virtual Machine.
+ - Following options are supported.
+ - C(vm) - Sets the Virtual Machine's UUID as its serial number.
+ - C(host) - Sets the host's UUID as the Virtual Machine's serial number.
+ - C(custom) - Allows you to specify a custom serial number in C(serial_policy_value).
+ choices: ['vm', 'host', 'custom']
+ type: str
+ serial_policy_value:
+ description:
+ - Allows you to specify a custom serial number.
+ - This parameter is used only when C(serial_policy) is I(custom).
+ type: str
+ vmware:
+ description:
+ - Dictionary of values to be used to connect to VMware and import
+ a virtual machine to oVirt.
+ type: dict
+ suboptions:
+ username:
+ description:
+ - The username to authenticate against the VMware.
+ password:
+ description:
+ - The password to authenticate against the VMware.
+ url:
+ description:
+ - The URL to be passed to the I(virt-v2v) tool for conversion.
+ - For example I(vpx://wmware_user@vcenter-host/DataCenter/Cluster/esxi-host?no_verify=1)
+ drivers_iso:
+ description:
+ - The name of the ISO containing drivers that can be used during the I(virt-v2v) conversion process.
+ sparse:
+ description:
+ - Specifies the disk allocation policy of the resulting virtual machine. I(true) for sparse, I(false) for preallocated.
+ type: bool
+ default: true
+ storage_domain:
+ description:
+ - Specifies the target storage domain for converted disks. This is required parameter.
+ xen:
+ description:
+ - Dictionary of values to be used to connect to XEN and import
+ a virtual machine to oVirt.
+ type: dict
+ suboptions:
+ url:
+ description:
+ - The URL to be passed to the I(virt-v2v) tool for conversion.
+ - For example I(xen+ssh://root@zen.server). This is required parameter.
+ drivers_iso:
+ description:
+ - The name of the ISO containing drivers that can be used during the I(virt-v2v) conversion process.
+ sparse:
+ description:
+ - Specifies the disk allocation policy of the resulting virtual machine. I(true) for sparse, I(false) for preallocated.
+ type: bool
+ default: true
+ storage_domain:
+ description:
+ - Specifies the target storage domain for converted disks. This is required parameter.
+ kvm:
+ description:
+ - Dictionary of values to be used to connect to kvm and import
+ a virtual machine to oVirt.
+ type: dict
+ suboptions:
+ name:
+ description:
+ - The name of the KVM virtual machine.
+ username:
+ description:
+ - The username to authenticate against the KVM.
+ password:
+ description:
+ - The password to authenticate against the KVM.
+ url:
+ description:
+ - The URL to be passed to the I(virt-v2v) tool for conversion.
+ - For example I(qemu:///system). This is required parameter.
+ drivers_iso:
+ description:
+ - The name of the ISO containing drivers that can be used during the I(virt-v2v) conversion process.
+ sparse:
+ description:
+ - Specifies the disk allocation policy of the resulting virtual machine. I(true) for sparse, I(false) for preallocated.
+ type: bool
+ default: true
+ storage_domain:
+ description:
+ - Specifies the target storage domain for converted disks. This is required parameter.
+ cpu_mode:
+ description:
+ - "CPU mode of the virtual machine. It can be some of the following: I(host_passthrough), I(host_model) or I(custom)."
+ - "For I(host_passthrough) CPU type you need to set C(placement_policy) to I(pinned)."
+ - "If no value is passed, default value is set by oVirt/RHV engine."
+ type: str
+ placement_policy:
+ description:
+ - "The configuration of the virtual machine's placement policy."
+ - "If no value is passed, default value is set by oVirt/RHV engine."
+ - "Placement policy can be one of the following values:"
+ type: str
+ suboptions:
+ migratable:
+ description:
+ - "Allow manual and automatic migration."
+ pinned:
+ description:
+ - "Do not allow migration."
+ user_migratable:
+ description:
+ - "Allow manual migration only."
+ ticket:
+ description:
+ - "If I(true), in addition return I(remote_vv_file) inside I(vm) dictionary, which contains compatible
+ content for remote-viewer application. Works only C(state) is I(running)."
+ type: bool
+ cpu_pinning:
+ description:
+ - "CPU Pinning topology to map virtual machine CPU to host CPU."
+ - "CPU Pinning topology is a list of dictionary which can have following values:"
+ type: list
+ elements: dict
+ suboptions:
+ cpu:
+ description:
+ - "Number of the host CPU."
+ vcpu:
+ description:
+ - "Number of the virtual machine CPU."
+ soundcard_enabled:
+ description:
+ - "If I(true), the sound card is added to the virtual machine."
+ type: bool
+ smartcard_enabled:
+ description:
+ - "If I(true), use smart card authentication."
+ type: bool
+ io_threads:
+ description:
+ - "Number of IO threads used by virtual machine. I(0) means IO threading disabled."
+ type: int
+ ballooning_enabled:
+ description:
+ - "If I(true), use memory ballooning."
+ - "Memory balloon is a guest device, which may be used to re-distribute / reclaim the host memory
+ based on VM needs in a dynamic way. In this way it's possible to create memory over commitment states."
+ type: bool
+ numa_tune_mode:
+ description:
+ - "Set how the memory allocation for NUMA nodes of this VM is applied (relevant if NUMA nodes are set for this VM)."
+ - "It can be one of the following: I(interleave), I(preferred) or I(strict)."
+ - "If no value is passed, default value is set by oVirt/RHV engine."
+ choices: ['interleave', 'preferred', 'strict']
+ type: str
+ numa_nodes:
+ description:
+ - "List of vNUMA Nodes to set for this VM and pin them to assigned host's physical NUMA node."
+ - "Each vNUMA node is described by following dictionary:"
+ type: list
+ elements: dict
+ suboptions:
+ index:
+ description:
+ - "The index of this NUMA node."
+ required: True
+ memory:
+ description:
+ - "Memory size of the NUMA node in MiB."
+ required: True
+ cores:
+ description:
+ - "List of VM CPU cores indexes to be included in this NUMA node."
+ type: list
+ required: True
+ numa_node_pins:
+ description:
+ - "List of physical NUMA node indexes to pin this virtual NUMA node to."
+ type: list
+ rng_device:
+ description:
+ - "Random number generator (RNG). You can choose of one the following devices I(urandom), I(random) or I(hwrng)."
+ - "In order to select I(hwrng), you must have it enabled on cluster first."
+ - "/dev/urandom is used for cluster version >= 4.1, and /dev/random for cluster version <= 4.0"
+ type: str
+ custom_properties:
+ description:
+ - "Properties sent to VDSM to configure various hooks."
+ - "Custom properties is a list of dictionary which can have following values:"
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - "Name of the custom property. For example: I(hugepages), I(vhost), I(sap_agent), etc."
+ regexp:
+ description:
+ - "Regular expression to set for custom property."
+ value:
+ description:
+ - "Value to set for custom property."
+ watchdog:
+ description:
+ - "Assign watchdog device for the virtual machine."
+ - "Watchdogs is a dictionary which can have following values:"
+ type: dict
+ suboptions:
+ model:
+ description:
+ - "Model of the watchdog device. For example: I(i6300esb), I(diag288) or I(null)."
+ action:
+ description:
+ - "Watchdog action to be performed when watchdog is triggered. For example: I(none), I(reset), I(poweroff), I(pause) or I(dump)."
+ graphical_console:
+ description:
+ - "Assign graphical console to the virtual machine."
+ type: dict
+ suboptions:
+ headless_mode:
+ description:
+ - If I(true) disable the graphics console for this virtual machine.
+ type: bool
+ protocol:
+ description:
+ - Graphical protocol, a list of I(spice), I(vnc), or both.
+ type: list
+ elements: str
+ disconnect_action:
+ description:
+ - "Returns the action that will take place when the graphic console(SPICE only) is disconnected. The options are:"
+ - I(none) No action is taken.
+ - I(lock_screen) Locks the currently active user session.
+ - I(logout) Logs out the currently active user session.
+ - I(reboot) Initiates a graceful virtual machine reboot.
+ - I(shutdown) Initiates a graceful virtual machine shutdown.
+ type: str
+ keyboard_layout:
+ description:
+ - The keyboard layout to use with this graphic console.
+ - This option is only available for the VNC console type.
+ - If no keyboard is enabled then it won't be reported.
+ type: str
+ monitors:
+ description:
+ - The number of monitors opened for this graphic console.
+ - This option is only available for the SPICE protocol.
+ - Possible values are 1, 2 or 4.
+ type: int
+ exclusive:
+ description:
+ - "When C(state) is I(exported) this parameter indicates if the existing VM with the
+ same name should be overwritten."
+ type: bool
+ export_domain:
+ description:
+ - "When C(state) is I(exported)this parameter specifies the name of the export storage domain."
+ type: str
+ export_ova:
+ description:
+ - Dictionary of values to be used to export VM as OVA.
+ type: dict
+ suboptions:
+ host:
+ description:
+ - The name of the destination host where the OVA has to be exported.
+ directory:
+ description:
+ - The name of the directory where the OVA has to be exported.
+ filename:
+ description:
+ - The name of the exported OVA file.
+ force_migrate:
+ description:
+ - If I(true), the VM will migrate when I(placement_policy=user-migratable) but not when I(placement_policy=pinned).
+ type: bool
+ migrate:
+ description:
+ - "If I(true), the VM will migrate to any available host."
+ type: bool
+ next_run:
+ description:
+ - "If I(true), the update will not be applied to the VM immediately and will be only applied when virtual machine is restarted."
+ - NOTE - If there are multiple next run configuration changes on the VM, the first change may get reverted if this option is not passed.
+ type: bool
+ snapshot_name:
+ description:
+ - "Snapshot to clone VM from."
+ - "Snapshot with description specified should exist."
+ - "You have to specify C(snapshot_vm) parameter with virtual machine name of this snapshot."
+ type: str
+ snapshot_vm:
+ description:
+ - "Source VM to clone VM from."
+ - "VM should have snapshot specified by C(snapshot)."
+ - "If C(snapshot_name) specified C(snapshot_vm) is required."
+ type: str
+ custom_emulated_machine:
+ description:
+ - "Sets the value of the custom_emulated_machine attribute."
+ type: str
+
+notes:
+ - If VM is in I(UNASSIGNED) or I(UNKNOWN) state before any operation, the module will fail.
+ If VM is in I(IMAGE_LOCKED) state before any operation, we try to wait for VM to be I(DOWN).
+ If VM is in I(SAVING_STATE) state before any operation, we try to wait for VM to be I(SUSPENDED).
+ If VM is in I(POWERING_DOWN) state before any operation, we try to wait for VM to be I(UP) or I(DOWN). VM can
+ get into I(UP) state from I(POWERING_DOWN) state, when there is no ACPI or guest agent running inside VM, or
+ if the shutdown operation fails.
+ When user specify I(UP) C(state), we always wait to VM to be in I(UP) state in case VM is I(MIGRATING),
+ I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). In other states we run start operation on VM.
+ When user specify I(stopped) C(state), and If user pass C(force) parameter set to I(true) we forcibly stop the VM in
+ any state. If user don't pass C(force) parameter, we always wait to VM to be in UP state in case VM is
+ I(MIGRATING), I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or
+ I(SUSPENDED) state, we start the VM. Then we gracefully shutdown the VM.
+ When user specify I(suspended) C(state), we always wait to VM to be in UP state in case VM is I(MIGRATING),
+ I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or I(DOWN) state,
+ we start the VM. Then we suspend the VM.
+ When user specify I(absent) C(state), we forcibly stop the VM in any state and remove it.
+ - "If you update a VM parameter that requires a reboot, the oVirt engine always creates a new snapshot for the VM,
+ and an Ansible playbook will report this as changed."
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Creates a new Virtual Machine from template named 'rhel7_template'
+ ovirt.ovirt.ovirt_vm:
+ state: present
+ name: myvm
+ template: rhel7_template
+ cluster: mycluster
+
+- name: Register VM
+ ovirt.ovirt.ovirt_vm:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ name: myvm
+
+- name: Register VM using id
+ ovirt.ovirt.ovirt_vm:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+
+- name: Register VM, allowing partial import
+ ovirt.ovirt.ovirt_vm:
+ state: registered
+ storage_domain: mystorage
+ allow_partial_import: "True"
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+
+- name: Register VM with vnic profile mappings and reassign bad macs
+ ovirt.ovirt.ovirt_vm:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+ vnic_profile_mappings:
+ - source_network_name: mynetwork
+ source_profile_name: mynetwork
+ target_profile_id: 3333-3333-3333-3333
+ - source_network_name: mynetwork2
+ source_profile_name: mynetwork2
+ target_profile_id: 4444-4444-4444-4444
+ reassign_bad_macs: "True"
+
+- name: Register VM with mappings
+ ovirt.ovirt.ovirt_vm:
+ state: registered
+ storage_domain: mystorage
+ cluster: mycluster
+ id: 1111-1111-1111-1111
+ role_mappings:
+ - source_name: Role_A
+ dest_name: Role_B
+ domain_mappings:
+ - source_name: Domain_A
+ dest_name: Domain_B
+ lun_mappings:
+ - source_storage_type: iscsi
+ source_logical_unit_id: 1IET_000d0001
+ source_logical_unit_port: 3260
+ source_logical_unit_portal: 1
+ source_logical_unit_address: 10.34.63.203
+ source_logical_unit_target: iqn.2016-08-09.brq.str-01:omachace
+ dest_storage_type: iscsi
+ dest_logical_unit_id: 1IET_000d0002
+ dest_logical_unit_port: 3260
+ dest_logical_unit_portal: 1
+ dest_logical_unit_address: 10.34.63.204
+ dest_logical_unit_target: iqn.2016-08-09.brq.str-02:omachace
+ affinity_group_mappings:
+ - source_name: Affinity_A
+ dest_name: Affinity_B
+ affinity_label_mappings:
+ - source_name: Label_A
+ dest_name: Label_B
+ cluster_mappings:
+ - source_name: cluster_A
+ dest_name: cluster_B
+
+- name: Creates a stateless VM which will always use latest template version
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ template: rhel7
+ cluster: mycluster
+ use_latest_template_version: true
+
+# Creates a new server rhel7 Virtual Machine from Blank template
+# on brq01 cluster with 2GiB memory and 2 vcpu cores/sockets
+# and attach bootable disk with name rhel7_disk and attach virtio NIC
+- ovirt.ovirt.ovirt_vm:
+ state: present
+ cluster: brq01
+ name: myvm
+ memory: 2GiB
+ cpu_cores: 2
+ cpu_sockets: 2
+ cpu_shares: 1024
+ type: server
+ operating_system: rhel_7x64
+ disks:
+ - name: rhel7_disk
+ bootable: True
+ nics:
+ - name: nic1
+
+# Change VM Name
+- ovirt.ovirt.ovirt_vm:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_vm_name"
+
+- name: Run VM with cloud init
+ ovirt.ovirt.ovirt_vm:
+ name: rhel7
+ template: rhel7
+ cluster: Default
+ memory: 1GiB
+ high_availability: true
+ high_availability_priority: 50 # Available from Ansible 2.5
+ cloud_init:
+ dns_servers: '8.8.8.8 8.8.4.4'
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+ nic_name: eth1
+ host_name: example.com
+ custom_script: |
+ write_files:
+ - content: |
+ Hello, world!
+ path: /tmp/greeting.txt
+ permissions: '0644'
+ user_name: root
+ root_password: super_password
+
+- name: Run VM with cloud init, with multiple network interfaces
+ ovirt.ovirt.ovirt_vm:
+ name: rhel7_4
+ template: rhel7
+ cluster: mycluster
+ cloud_init_nics:
+ - nic_name: eth0
+ nic_boot_protocol: dhcp
+ - nic_name: eth1
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+ # IP version 6 parameters are supported since ansible 2.9
+ - nic_name: eth2
+ nic_boot_protocol_v6: static
+ nic_ip_address_v6: '2620:52:0:2282:b898:1f69:6512:36c5'
+ nic_gateway_v6: '2620:52:0:2282:b898:1f69:6512:36c9'
+ nic_netmask_v6: '120'
+ - nic_name: eth3
+ nic_boot_protocol_v6: dhcp
+
+- name: Run VM with sysprep
+ ovirt.ovirt.ovirt_vm:
+ name: windows2012R2_AD
+ template: windows2012R2
+ cluster: Default
+ memory: 3GiB
+ high_availability: true
+ sysprep:
+ host_name: windowsad.example.com
+ user_name: Administrator
+ root_password: SuperPassword123
+
+- name: Migrate/Run VM to/on host named 'host1'
+ ovirt.ovirt.ovirt_vm:
+ state: running
+ name: myvm
+ host: host1
+
+- name: Migrate VM to any available host
+ ovirt.ovirt.ovirt_vm:
+ state: running
+ name: myvm
+ migrate: true
+
+- name: Change VMs CD
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ cd_iso: drivers.iso
+
+- name: Eject VMs CD
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ cd_iso: ''
+
+- name: Boot VM from CD
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ cd_iso: centos7_x64.iso
+ boot_devices:
+ - cdrom
+
+- name: Stop vm
+ ovirt.ovirt.ovirt_vm:
+ state: stopped
+ name: myvm
+
+- name: Upgrade memory to already created VM
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ memory: 4GiB
+
+- name: Hot plug memory to already created and running VM (VM won't be restarted)
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ memory: 4GiB
+
+# Create/update a VM to run with two vNUMA nodes and pin them to physical NUMA nodes as follows:
+# vnuma index 0-> numa index 0, vnuma index 1-> numa index 1
+- name: Create a VM to run with two vNUMA nodes
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ cluster: mycluster
+ numa_tune_mode: "interleave"
+ numa_nodes:
+ - index: 0
+ cores: [0]
+ memory: 20
+ numa_node_pins: [0]
+ - index: 1
+ cores: [1]
+ memory: 30
+ numa_node_pins: [1]
+
+- name: Update an existing VM to run without previously created vNUMA nodes (i.e. remove all vNUMA nodes+NUMA pinning setting)
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ cluster: mycluster
+ state: "present"
+ numa_tune_mode: "interleave"
+ numa_nodes:
+ - index: -1
+
+# When change on the VM needs restart of the VM, use next_run state,
+# The VM will be updated and rebooted if there are any changes.
+# If present state would be used, VM won't be restarted.
+- ovirt.ovirt.ovirt_vm:
+ state: next_run
+ name: myvm
+ boot_devices:
+ - network
+
+- name: Import virtual machine from VMware
+ ovirt.ovirt.ovirt_vm:
+ state: stopped
+ cluster: mycluster
+ name: vmware_win10
+ timeout: 1800
+ poll_interval: 30
+ vmware:
+ url: vpx://user@1.2.3.4/Folder1/Cluster1/2.3.4.5?no_verify=1
+ name: windows10
+ storage_domain: mynfs
+ username: user
+ password: password
+
+- name: Create vm from template and create all disks on specific storage domain
+ ovirt.ovirt.ovirt_vm:
+ name: vm_test
+ cluster: mycluster
+ template: mytemplate
+ storage_domain: mynfs
+ nics:
+ - name: nic1
+
+- name: Remove VM, if VM is running it will be stopped
+ ovirt.ovirt.ovirt_vm:
+ state: absent
+ name: myvm
+
+# Defining a specific quota for a VM:
+# Since Ansible 2.5
+- ovirt.ovirt.ovirt_quotas_info:
+ data_center: Default
+ name: myquota
+ register: ovirt_quotas
+- ovirt.ovirt.ovirt_vm:
+ name: myvm
+ sso: False
+ boot_menu: True
+ bios_type: q35_ovmf
+ usb_support: True
+ serial_console: True
+ quota_id: "{{ ovirt_quotas[0]['id'] }}"
+
+- name: Create a VM that has the console configured for both Spice and VNC
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ template: mytemplate
+ cluster: mycluster
+ graphical_console:
+ protocol:
+ - spice
+ - vnc
+
+# Execute remote viewer to VM
+- block:
+ - name: Create a ticket for console for a running VM
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ ticket: true
+ state: running
+ register: myvm
+
+ - name: Save ticket to file
+ ansible.builtin.copy:
+ content: "{{ myvm.vm.remote_vv_file }}"
+ dest: ~/vvfile.vv
+
+ - name: Run remote viewer with file
+ ansible.builtin.command: remote-viewer ~/vvfile.vv
+
+# Default value of host_device state is present
+- name: Attach host devices to virtual machine
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ host: myhost
+ placement_policy: pinned
+ host_devices:
+ - name: pci_0000_00_06_0
+ - name: pci_0000_00_07_0
+ state: absent
+ - name: pci_0000_00_08_0
+ state: present
+
+- name: Export the VM as OVA
+ ovirt.ovirt.ovirt_vm:
+ name: myvm
+ state: exported
+ cluster: mycluster
+ export_ova:
+ host: myhost
+ filename: myvm.ova
+ directory: /tmp/
+
+- name: Clone VM from snapshot
+ ovirt.ovirt.ovirt_vm:
+ snapshot_vm: myvm
+ snapshot_name: myvm_snap
+ name: myvm_clone
+ state: present
+
+- name: Import external ova VM
+ ovirt.ovirt.ovirt_vm:
+ cluster: mycluster
+ name: myvm
+ host: myhost
+ timeout: 1800
+ poll_interval: 30
+ kvm:
+ name: myvm
+ url: ova:///path/myvm.ova
+ storage_domain: mystorage
+
+- name: Cpu pinning of 0#12_1#13_2#14_3#15
+ ovirt.ovirt.ovirt_vm:
+ state: present
+ cluster: mycluster
+ name: myvm
+ cpu_pinning:
+ - cpu: 12
+ vcpu: 0
+ - cpu: 13
+ vcpu: 1
+ - cpu: 14
+ vcpu: 2
+ - cpu: 15
+ vcpu: 3
+'''
+
+
+RETURN = '''
+id:
+ description: ID of the VM which is managed
+ returned: On success if VM is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+vm:
+ description: "Dictionary of all the VM attributes. VM attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm.
+ Additionally when user sent ticket=true, this module will return also remote_vv_file
+ parameter in vm dictionary, which contains remote-viewer compatible file to open virtual
+ machine console. Please note that this file contains sensible information."
+ returned: On success if VM is found.
+ type: dict
+'''
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_params,
+ check_sdk,
+ convert_to_bytes,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ get_entity,
+ get_link_name,
+ get_id_by_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+ wait,
+)
+
+
+class VmsModule(BaseModule):
+
+ def __init__(self, *args, **kwargs):
+ super(VmsModule, self).__init__(*args, **kwargs)
+ self._initialization = None
+ self._is_new = False
+
+ def __get_template_with_version(self):
+ """
+ oVirt/RHV in version 4.1 doesn't support search by template+version_number,
+ so we need to list all templates with specific name and then iterate
+ through it's version until we find the version we look for.
+ """
+ template = None
+ templates_service = self._connection.system_service().templates_service()
+ if self._is_new:
+ if self.param('template'):
+ clusters_service = self._connection.system_service().clusters_service()
+ cluster = search_by_name(clusters_service, self.param('cluster'))
+ data_center = self._connection.follow_link(cluster.data_center)
+ templates = templates_service.list(
+ search='name=%s and datacenter=%s' % (self.param('template'), data_center.name)
+ )
+ if self.param('template_version'):
+ templates = [
+ t for t in templates
+ if t.version.version_number == self.param('template_version')
+ ]
+ if not templates:
+ raise ValueError(
+ "Template with name '%s' and version '%s' in data center '%s' was not found" % (
+ self.param('template'),
+ self.param('template_version'),
+ data_center.name
+ )
+ )
+ template = sorted(templates, key=lambda t: t.version.version_number, reverse=True)[0]
+ else:
+ # If template isn't specified and VM is about to be created specify default template:
+ template = templates_service.template_service('00000000-0000-0000-0000-000000000000').get()
+ else:
+ templates = templates_service.list(
+ search='vm.name=%s' % self.param('name')
+ )
+ if templates:
+ template = templates[0]
+ if self.param('template') is not None and self.param('template') != template.name:
+ raise ValueError("You can not change template of the Virtual Machine.")
+
+ return template
+
+ def __get_storage_domain_and_all_template_disks(self, template):
+
+ if self.param('template') is None:
+ return None
+
+ if self.param('storage_domain') is None:
+ return None
+
+ disks = list()
+
+ for att in self._connection.follow_link(template.disk_attachments):
+ disks.append(
+ otypes.DiskAttachment(
+ disk=otypes.Disk(
+ id=att.disk.id,
+ format=otypes.DiskFormat(self.param('disk_format')),
+ storage_domains=[
+ otypes.StorageDomain(
+ id=get_id_by_name(
+ self._connection.system_service().storage_domains_service(),
+ self.param('storage_domain')
+ )
+ )
+ ]
+ )
+ )
+ )
+
+ return disks
+
+ def __get_snapshot(self):
+
+ if self.param('snapshot_vm') is None:
+ return None
+
+ if self.param('snapshot_name') is None:
+ return None
+
+ vms_service = self._connection.system_service().vms_service()
+ vm_id = get_id_by_name(vms_service, self.param('snapshot_vm'))
+ vm_service = vms_service.vm_service(vm_id)
+
+ snaps_service = vm_service.snapshots_service()
+ snaps = snaps_service.list()
+ snap = next(
+ (s for s in snaps if s.description == self.param('snapshot_name')),
+ None
+ )
+ return snap
+
+ def __get_cluster(self):
+ if self.param('cluster') is not None:
+ return self.param('cluster')
+ elif self.param('snapshot_name') is not None and self.param('snapshot_vm') is not None:
+ vms_service = self._connection.system_service().vms_service()
+ vm = search_by_name(vms_service, self.param('snapshot_vm'))
+ return self._connection.system_service().clusters_service().cluster_service(vm.cluster.id).get().name
+
+ def build_entity(self):
+ template = self.__get_template_with_version()
+ cluster = self.__get_cluster()
+ snapshot = self.__get_snapshot()
+ display = self.param('graphical_console') or dict()
+
+ disk_attachments = self.__get_storage_domain_and_all_template_disks(template)
+
+ return otypes.Vm(
+ id=self.param('id'),
+ name=self.param('name'),
+ cluster=otypes.Cluster(
+ name=cluster
+ ) if cluster else None,
+ disk_attachments=disk_attachments,
+ template=otypes.Template(
+ id=template.id,
+ ) if template else None,
+ use_latest_template_version=self.param('use_latest_template_version'),
+ stateless=self.param('stateless') or self.param('use_latest_template_version'),
+ delete_protected=self.param('delete_protected'),
+ custom_emulated_machine=self.param('custom_emulated_machine'),
+ bios=(
+ otypes.Bios(
+ boot_menu=otypes.BootMenu(enabled=self.param('boot_menu')) if self.param('boot_menu') is not None else None,
+ type=otypes.BiosType[self.param('bios_type').upper()] if self.param('bios_type') is not None else None
+ )
+ ) if self.param('boot_menu') is not None or self.param('bios_type') is not None else None,
+ console=(
+ otypes.Console(enabled=self.param('serial_console'))
+ ) if self.param('serial_console') is not None else None,
+ usb=(
+ otypes.Usb(enabled=self.param('usb_support'))
+ ) if self.param('usb_support') is not None else None,
+ sso=(
+ otypes.Sso(
+ methods=[otypes.Method(id=otypes.SsoMethod.GUEST_AGENT)] if self.param('sso') else []
+ )
+ ) if self.param('sso') is not None else None,
+ quota=otypes.Quota(id=self._module.params.get('quota_id')) if self.param('quota_id') is not None else None,
+ high_availability=otypes.HighAvailability(
+ enabled=self.param('high_availability'),
+ priority=self.param('high_availability_priority'),
+ ) if self.param('high_availability') is not None or self.param('high_availability_priority') else None,
+ lease=otypes.StorageDomainLease(
+ storage_domain=otypes.StorageDomain(
+ id=get_id_by_name(
+ service=self._connection.system_service().storage_domains_service(),
+ name=self.param('lease')
+ ) if self.param('lease') else None
+ )
+ ) if self.param('lease') is not None else None,
+ cpu=otypes.Cpu(
+ topology=otypes.CpuTopology(
+ cores=self.param('cpu_cores'),
+ sockets=self.param('cpu_sockets'),
+ threads=self.param('cpu_threads'),
+ ) if any((
+ self.param('cpu_cores'),
+ self.param('cpu_sockets'),
+ self.param('cpu_threads')
+ )) else None,
+ cpu_tune=otypes.CpuTune(
+ vcpu_pins=[
+ otypes.VcpuPin(vcpu=int(pin['vcpu']), cpu_set=str(pin['cpu'])) for pin in self.param('cpu_pinning')
+ ],
+ ) if self.param('cpu_pinning') else None,
+ mode=otypes.CpuMode(self.param('cpu_mode')) if self.param('cpu_mode') else None,
+ ) if any((
+ self.param('cpu_cores'),
+ self.param('cpu_sockets'),
+ self.param('cpu_threads'),
+ self.param('cpu_mode'),
+ self.param('cpu_pinning')
+ )) else None,
+ cpu_shares=self.param('cpu_shares'),
+ os=otypes.OperatingSystem(
+ type=self.param('operating_system'),
+ boot=otypes.Boot(
+ devices=[
+ otypes.BootDevice(dev) for dev in self.param('boot_devices')
+ ],
+ ) if self.param('boot_devices') else None,
+ cmdline=self.param('kernel_params') if self.param('kernel_params_persist') else None,
+ initrd=self.param('initrd_path') if self.param('kernel_params_persist') else None,
+ kernel=self.param('kernel_path') if self.param('kernel_params_persist') else None,
+ ) if (
+ self.param('operating_system') or self.param('boot_devices') or self.param('kernel_params_persist')
+ ) else None,
+ type=otypes.VmType(
+ self.param('type')
+ ) if self.param('type') else None,
+ memory=convert_to_bytes(
+ self.param('memory')
+ ) if self.param('memory') else None,
+ memory_policy=otypes.MemoryPolicy(
+ guaranteed=convert_to_bytes(self.param('memory_guaranteed')),
+ ballooning=self.param('ballooning_enabled'),
+ max=convert_to_bytes(self.param('memory_max')),
+ ) if any((
+ self.param('memory_guaranteed'),
+ self.param('ballooning_enabled') is not None,
+ self.param('memory_max')
+ )) else None,
+ instance_type=otypes.InstanceType(
+ id=get_id_by_name(
+ self._connection.system_service().instance_types_service(),
+ self.param('instance_type'),
+ ),
+ ) if self.param('instance_type') else None,
+ custom_compatibility_version=otypes.Version(
+ major=self._get_major(self.param('custom_compatibility_version')),
+ minor=self._get_minor(self.param('custom_compatibility_version')),
+ ) if self.param('custom_compatibility_version') is not None else None,
+ description=self.param('description'),
+ comment=self.param('comment'),
+ time_zone=otypes.TimeZone(
+ name=self.param('timezone'),
+ ) if self.param('timezone') else None,
+ serial_number=otypes.SerialNumber(
+ policy=otypes.SerialNumberPolicy(self.param('serial_policy')),
+ value=self.param('serial_policy_value'),
+ ) if (
+ self.param('serial_policy') is not None or
+ self.param('serial_policy_value') is not None
+ ) else None,
+ placement_policy=otypes.VmPlacementPolicy(
+ affinity=otypes.VmAffinity(self.param('placement_policy')),
+ hosts=[
+ otypes.Host(name=self.param('host')),
+ ] if self.param('host') else None,
+ ) if self.param('placement_policy') else None,
+ soundcard_enabled=self.param('soundcard_enabled'),
+ display=otypes.Display(
+ smartcard_enabled=self.param('smartcard_enabled'),
+ disconnect_action=display.get('disconnect_action'),
+ keyboard_layout=display.get('keyboard_layout'),
+ monitors=display.get('monitors'),
+ ) if (
+ self.param('smartcard_enabled') is not None or
+ display.get('disconnect_action') is not None or
+ display.get('keyboard_layout') is not None or
+ display.get('monitors') is not None
+ ) else None,
+ io=otypes.Io(
+ threads=self.param('io_threads'),
+ ) if self.param('io_threads') is not None else None,
+ numa_tune_mode=otypes.NumaTuneMode(
+ self.param('numa_tune_mode')
+ ) if self.param('numa_tune_mode') else None,
+ rng_device=otypes.RngDevice(
+ source=otypes.RngSource(self.param('rng_device')),
+ ) if self.param('rng_device') else None,
+ custom_properties=[
+ otypes.CustomProperty(
+ name=cp.get('name'),
+ regexp=cp.get('regexp'),
+ value=str(cp.get('value')),
+ ) for cp in self.param('custom_properties') if cp
+ ] if self.param('custom_properties') is not None else None,
+ initialization=self.get_initialization() if self.param('cloud_init_persist') else None,
+ snapshots=[otypes.Snapshot(id=snapshot.id)] if snapshot is not None else None,
+ )
+
+ def _get_export_domain_service(self):
+ provider_name = self._module.params['export_domain']
+ export_sds_service = self._connection.system_service().storage_domains_service()
+ export_sd_id = get_id_by_name(export_sds_service, provider_name)
+ return export_sds_service.service(export_sd_id)
+
+ def post_export_action(self, entity):
+ self._service = self._get_export_domain_service().vms_service()
+
+ def update_check(self, entity):
+ res = self._update_check(entity)
+ if entity.next_run_configuration_exists:
+ res = res and self._update_check(self._service.service(entity.id).get(next_run=True))
+
+ return res
+
+ def _update_check(self, entity):
+ def check_cpu_pinning():
+ if self.param('cpu_pinning'):
+ current = []
+ if entity.cpu.cpu_tune:
+ current = [(str(pin.cpu_set), int(pin.vcpu)) for pin in entity.cpu.cpu_tune.vcpu_pins]
+ passed = [(str(pin['cpu']), int(pin['vcpu'])) for pin in self.param('cpu_pinning')]
+ return sorted(current) == sorted(passed)
+ return True
+
+ def check_custom_properties():
+ if self.param('custom_properties'):
+ current = []
+ if entity.custom_properties:
+ current = [(cp.name, cp.regexp, str(cp.value)) for cp in entity.custom_properties]
+ passed = [(cp.get('name'), cp.get('regexp'), str(cp.get('value'))) for cp in self.param('custom_properties') if cp]
+ return sorted(current) == sorted(passed)
+ return True
+
+ def check_host():
+ if self.param('host') is not None:
+ return self.param('host') in [self._connection.follow_link(host).name for host in getattr(entity.placement_policy, 'hosts', None) or []]
+ return True
+
+ def check_custom_compatibility_version():
+ if self.param('custom_compatibility_version') is not None:
+ return (self._get_minor(self.param('custom_compatibility_version')) == self._get_minor(entity.custom_compatibility_version) and
+ self._get_major(self.param('custom_compatibility_version')) == self._get_major(entity.custom_compatibility_version))
+ return True
+
+ cpu_mode = getattr(entity.cpu, 'mode')
+ vm_display = entity.display
+ provided_vm_display = self.param('graphical_console') or dict()
+ return (
+ check_cpu_pinning() and
+ check_custom_properties() and
+ check_host() and
+ check_custom_compatibility_version() and
+ not self.param('cloud_init_persist') and
+ not self.param('kernel_params_persist') and
+ equal(self.param('cluster'), get_link_name(self._connection, entity.cluster)) and equal(convert_to_bytes(self.param('memory')), entity.memory) and
+ equal(convert_to_bytes(self.param('memory_guaranteed')), entity.memory_policy.guaranteed) and
+ equal(convert_to_bytes(self.param('memory_max')), entity.memory_policy.max) and
+ equal(self.param('cpu_cores'), entity.cpu.topology.cores) and
+ equal(self.param('cpu_sockets'), entity.cpu.topology.sockets) and
+ equal(self.param('cpu_threads'), entity.cpu.topology.threads) and
+ equal(self.param('cpu_mode'), str(cpu_mode) if cpu_mode else None) and
+ equal(self.param('type'), str(entity.type)) and
+ equal(self.param('name'), str(entity.name)) and
+ equal(self.param('operating_system'), str(entity.os.type)) and
+ equal(self.param('boot_menu'), entity.bios.boot_menu.enabled) and
+ equal(self.param('bios_type'), entity.bios.type.value) and
+ equal(self.param('soundcard_enabled'), entity.soundcard_enabled) and
+ equal(self.param('smartcard_enabled'), getattr(vm_display, 'smartcard_enabled', False)) and
+ equal(self.param('io_threads'), entity.io.threads) and
+ equal(self.param('ballooning_enabled'), entity.memory_policy.ballooning) and
+ equal(self.param('serial_console'), getattr(entity.console, 'enabled', None)) and
+ equal(self.param('usb_support'), entity.usb.enabled) and
+ equal(self.param('sso'), True if entity.sso.methods else False) and
+ equal(self.param('quota_id'), getattr(entity.quota, 'id', None)) and
+ equal(self.param('high_availability'), entity.high_availability.enabled) and
+ equal(self.param('high_availability_priority'), entity.high_availability.priority) and
+ equal(self.param('lease'), get_link_name(self._connection, getattr(entity.lease, 'storage_domain', None))) and
+ equal(self.param('stateless'), entity.stateless) and
+ equal(self.param('cpu_shares'), entity.cpu_shares) and
+ equal(self.param('delete_protected'), entity.delete_protected) and
+ equal(self.param('custom_emulated_machine'), entity.custom_emulated_machine) and
+ equal(self.param('use_latest_template_version'), entity.use_latest_template_version) and
+ equal(self.param('boot_devices'), [str(dev) for dev in getattr(entity.os.boot, 'devices', [])]) and
+ equal(self.param('instance_type'), get_link_name(self._connection, entity.instance_type), ignore_case=True) and
+ equal(self.param('description'), entity.description) and
+ equal(self.param('comment'), entity.comment) and
+ equal(self.param('timezone'), getattr(entity.time_zone, 'name', None)) and
+ equal(self.param('serial_policy'), str(getattr(entity.serial_number, 'policy', None))) and
+ equal(self.param('serial_policy_value'), getattr(entity.serial_number, 'value', None)) and
+ equal(self.param('placement_policy'), str(entity.placement_policy.affinity) if entity.placement_policy else None) and
+ equal(self.param('numa_tune_mode'), str(entity.numa_tune_mode)) and
+ equal(self.param('rng_device'), str(entity.rng_device.source) if entity.rng_device else None) and
+ equal(provided_vm_display.get('monitors'), getattr(vm_display, 'monitors', None)) and
+ equal(provided_vm_display.get('keyboard_layout'), getattr(vm_display, 'keyboard_layout', None)) and
+ equal(provided_vm_display.get('disconnect_action'), getattr(vm_display, 'disconnect_action', None), ignore_case=True)
+ )
+
+ def pre_create(self, entity):
+ # Mark if entity exists before touching it:
+ if entity is None:
+ self._is_new = True
+
+ def post_update(self, entity):
+ self.post_present(entity.id)
+
+ def post_present(self, entity_id):
+ # After creation of the VM, attach disks and NICs:
+ entity = self._service.service(entity_id).get()
+ self.__attach_disks(entity)
+ self.__attach_nics(entity)
+ self._attach_cd(entity)
+ self.changed = self.__attach_numa_nodes(entity)
+ self.changed = self.__attach_watchdog(entity)
+ self.changed = self.__attach_graphical_console(entity)
+ self.changed = self.__attach_host_devices(entity)
+
+ def pre_remove(self, entity):
+ # Forcibly stop the VM, if it's not in DOWN state:
+ if entity.status != otypes.VmStatus.DOWN:
+ if not self._module.check_mode:
+ self.changed = self.action(
+ action='stop',
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )['changed']
+
+ def __suspend_shutdown_common(self, vm_service):
+ if vm_service.get().status in [
+ otypes.VmStatus.MIGRATING,
+ otypes.VmStatus.POWERING_UP,
+ otypes.VmStatus.REBOOT_IN_PROGRESS,
+ otypes.VmStatus.WAIT_FOR_LAUNCH,
+ otypes.VmStatus.UP,
+ otypes.VmStatus.RESTORING_STATE,
+ ]:
+ self._wait_for_UP(vm_service)
+
+ def _pre_shutdown_action(self, entity):
+ vm_service = self._service.vm_service(entity.id)
+ self.__suspend_shutdown_common(vm_service)
+ if entity.status in [otypes.VmStatus.SUSPENDED, otypes.VmStatus.PAUSED]:
+ vm_service.start()
+ self._wait_for_UP(vm_service)
+ return vm_service.get()
+
+ def _pre_suspend_action(self, entity):
+ vm_service = self._service.vm_service(entity.id)
+ self.__suspend_shutdown_common(vm_service)
+ if entity.status in [otypes.VmStatus.PAUSED, otypes.VmStatus.DOWN]:
+ vm_service.start()
+ self._wait_for_UP(vm_service)
+ return vm_service.get()
+
+ def _post_start_action(self, entity):
+ vm_service = self._service.service(entity.id)
+ self._wait_for_UP(vm_service)
+ self._attach_cd(vm_service.get())
+
+ def __get_cds_from_sds(self, sds):
+ for sd in sds:
+ if sd.type == otypes.StorageDomainType.ISO:
+ disks = sd.files
+ elif sd.type == otypes.StorageDomainType.DATA:
+ disks = sd.disks
+ else:
+ continue
+ disks = list(filter(lambda x: (x.name == self.param('cd_iso') or x.id == self.param('cd_iso')) and
+ (sd.type == otypes.StorageDomainType.ISO or x.content_type == otypes.DiskContentType.ISO),
+ self._connection.follow_link(disks)))
+ if disks:
+ return disks
+
+ def __get_cd_id(self):
+ sds_service = self._connection.system_service().storage_domains_service()
+ sds = sds_service.list(search='name="{0}"'.format(self.param('storage_domain') if self.param('storage_domain') else "*"))
+ disks = self.__get_cds_from_sds(sds)
+ if not disks:
+ raise ValueError('Was not able to find disk with name or id "{0}".'.format(self.param('cd_iso')))
+ if len(disks) > 1:
+ raise ValueError('Found mutiple disks with same name "{0}" please use \
+ disk ID in "cd_iso" to specify which disk should be used.'.format(self.param('cd_iso')))
+ return disks[0].id
+
+ def _attach_cd(self, entity):
+ cd_iso_id = self.param('cd_iso')
+ if cd_iso_id is not None:
+ if cd_iso_id:
+ cd_iso_id = self.__get_cd_id()
+ vm_service = self._service.service(entity.id)
+ current = vm_service.get().status == otypes.VmStatus.UP and self.param('state') == 'running'
+ cdroms_service = vm_service.cdroms_service()
+ cdrom_device = cdroms_service.list()[0]
+ cdrom_service = cdroms_service.cdrom_service(cdrom_device.id)
+ cdrom = cdrom_service.get(current=current)
+ if getattr(cdrom.file, 'id', '') != cd_iso_id:
+ if not self._module.check_mode:
+ cdrom_service.update(
+ cdrom=otypes.Cdrom(
+ file=otypes.File(id=cd_iso_id)
+ ),
+ current=current,
+ )
+ self.changed = True
+
+ return entity
+
+ def _migrate_vm(self, entity):
+ vm_host = self.param('host')
+ vm_service = self._service.vm_service(entity.id)
+ # In case VM is preparing to be UP, wait to be up, to migrate it:
+ if entity.status == otypes.VmStatus.UP:
+ if vm_host is not None:
+ hosts_service = self._connection.system_service().hosts_service()
+ current_vm_host = hosts_service.host_service(entity.host.id).get().name
+ if vm_host != current_vm_host:
+ if not self._module.check_mode:
+ vm_service.migrate(host=otypes.Host(name=vm_host), force=self.param('force_migrate'))
+ self._wait_for_UP(vm_service)
+ self.changed = True
+ elif self.param('migrate'):
+ if not self._module.check_mode:
+ vm_service.migrate(force=self.param('force_migrate'))
+ self._wait_for_UP(vm_service)
+ self.changed = True
+ return entity
+
+ def _wait_for_UP(self, vm_service):
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def _wait_for_vm_disks(self, vm_service):
+ disks_service = self._connection.system_service().disks_service()
+ for da in vm_service.disk_attachments_service().list():
+ disk_service = disks_service.disk_service(da.disk.id)
+ wait(
+ service=disk_service,
+ condition=lambda disk: disk.status == otypes.DiskStatus.OK if disk.storage_type == otypes.DiskStorageType.IMAGE else True,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+
+ def wait_for_down(self, vm):
+ """
+ This function will first wait for the status DOWN of the VM.
+ Then it will find the active snapshot and wait until it's state is OK for
+ stateless VMs and stateless snapshot is removed.
+ """
+ vm_service = self._service.vm_service(vm.id)
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+ if vm.stateless:
+ snapshots_service = vm_service.snapshots_service()
+ snapshots = snapshots_service.list()
+ snap_active = [
+ snap for snap in snapshots
+ if snap.snapshot_type == otypes.SnapshotType.ACTIVE
+ ][0]
+ snap_stateless = [
+ snap for snap in snapshots
+ if snap.snapshot_type == otypes.SnapshotType.STATELESS
+ ]
+ # Stateless snapshot may be already removed:
+ if snap_stateless:
+ """
+ We need to wait for Active snapshot ID, to be removed as it's current
+ stateless snapshot. Then we need to wait for staless snapshot ID to
+ be read, for use, because it will become active snapshot.
+ """
+ wait(
+ service=snapshots_service.snapshot_service(snap_active.id),
+ condition=lambda snap: snap is None,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+ wait(
+ service=snapshots_service.snapshot_service(snap_stateless[0].id),
+ condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK,
+ wait=self.param('wait'),
+ timeout=self.param('timeout'),
+ )
+ return True
+
+ def __attach_graphical_console(self, entity):
+ graphical_console = self.param('graphical_console')
+ if not graphical_console:
+ return False
+
+ vm_service = self._service.service(entity.id)
+ gcs_service = vm_service.graphics_consoles_service()
+ graphical_consoles = gcs_service.list()
+
+ # Remove all graphical consoles if there are any:
+ if bool(graphical_console.get('headless_mode')):
+ if not self._module.check_mode:
+ for gc in graphical_consoles:
+ gcs_service.console_service(gc.id).remove()
+ return len(graphical_consoles) > 0
+
+ # If there are not gc add any gc to be added:
+ protocol = graphical_console.get('protocol')
+ current_protocols = [str(gc.protocol) for gc in graphical_consoles]
+ if not current_protocols:
+ if not self._module.check_mode:
+ for p in protocol:
+ gcs_service.add(
+ otypes.GraphicsConsole(
+ protocol=otypes.GraphicsType(p),
+ )
+ )
+ return True
+
+ # Update consoles:
+ if sorted(protocol) != sorted(current_protocols):
+ if not self._module.check_mode:
+ for gc in graphical_consoles:
+ gcs_service.console_service(gc.id).remove()
+ for p in protocol:
+ gcs_service.add(
+ otypes.GraphicsConsole(
+ protocol=otypes.GraphicsType(p),
+ )
+ )
+ return True
+
+ def __attach_disks(self, entity):
+ if not self.param('disks'):
+ return
+
+ vm_service = self._service.service(entity.id)
+ disks_service = self._connection.system_service().disks_service()
+ disk_attachments_service = vm_service.disk_attachments_service()
+
+ self._wait_for_vm_disks(vm_service)
+ for disk in self.param('disks'):
+ # If disk ID is not specified, find disk by name:
+ disk_id = disk.get('id')
+ if disk_id is None:
+ disk_id = getattr(
+ search_by_name(
+ service=disks_service,
+ name=disk.get('name')
+ ),
+ 'id',
+ None
+ )
+
+ # Attach disk to VM:
+ disk_attachment = disk_attachments_service.attachment_service(disk_id)
+ if get_entity(disk_attachment) is None:
+ if not self._module.check_mode:
+ disk_attachments_service.add(
+ otypes.DiskAttachment(
+ disk=otypes.Disk(
+ id=disk_id,
+ ),
+ active=disk.get('activate', True),
+ interface=otypes.DiskInterface(
+ disk.get('interface', 'virtio')
+ ),
+ bootable=disk.get('bootable', False),
+ )
+ )
+ self.changed = True
+
+ def __get_vnic_profile_id(self, nic):
+ """
+ Return VNIC profile ID looked up by it's name, because there can be
+ more VNIC profiles with same name, other criteria of filter is cluster.
+ """
+ vnics_service = self._connection.system_service().vnic_profiles_service()
+ clusters_service = self._connection.system_service().clusters_service()
+ cluster = search_by_name(clusters_service, self.param('cluster'))
+ profiles = [
+ profile for profile in vnics_service.list()
+ if profile.name == nic.get('profile_name')
+ ]
+ cluster_networks = [
+ net.id for net in self._connection.follow_link(cluster.networks)
+ ]
+ try:
+ return next(
+ profile.id for profile in profiles
+ if profile.network.id in cluster_networks
+ )
+ except StopIteration:
+ raise Exception(
+ "Profile '%s' was not found in cluster '%s'" % (
+ nic.get('profile_name'),
+ self.param('cluster')
+ )
+ )
+
+ def __get_numa_serialized(self, numa):
+ return sorted([(x.index,
+ [y.index for y in x.cpu.cores] if x.cpu else [],
+ x.memory,
+ [y.index for y in x.numa_node_pins] if x.numa_node_pins else []
+ ) for x in numa], key=lambda x: x[0])
+
+ def __attach_numa_nodes(self, entity):
+ numa_nodes_service = self._service.service(entity.id).numa_nodes_service()
+ existed_numa_nodes = numa_nodes_service.list()
+ if len(self.param('numa_nodes')) > 0:
+ # Remove all existing virtual numa nodes before adding new ones
+ for current_numa_node in sorted(existed_numa_nodes, reverse=True, key=lambda x: x.index):
+ numa_nodes_service.node_service(current_numa_node.id).remove()
+
+ for numa_node in self.param('numa_nodes'):
+ if numa_node is None or numa_node.get('index') is None or numa_node.get('cores') is None or numa_node.get('memory') is None:
+ continue
+
+ numa_nodes_service.add(
+ otypes.VirtualNumaNode(
+ index=numa_node.get('index'),
+ memory=numa_node.get('memory'),
+ cpu=otypes.Cpu(
+ cores=[
+ otypes.Core(
+ index=core
+ ) for core in numa_node.get('cores')
+ ],
+ ),
+ numa_node_pins=[
+ otypes.NumaNodePin(
+ index=pin
+ ) for pin in numa_node.get('numa_node_pins')
+ ] if numa_node.get('numa_node_pins') is not None else None,
+ )
+ )
+ return self.__get_numa_serialized(numa_nodes_service.list()) != self.__get_numa_serialized(existed_numa_nodes)
+
+ def __attach_watchdog(self, entity):
+ watchdogs_service = self._service.service(entity.id).watchdogs_service()
+ watchdog = self.param('watchdog')
+ if watchdog is not None:
+ current_watchdog = next(iter(watchdogs_service.list()), None)
+ if watchdog.get('model') is None and current_watchdog:
+ watchdogs_service.watchdog_service(current_watchdog.id).remove()
+ return True
+ elif watchdog.get('model') is not None and current_watchdog is None:
+ watchdogs_service.add(
+ otypes.Watchdog(
+ model=otypes.WatchdogModel(watchdog.get('model').lower()),
+ action=otypes.WatchdogAction(watchdog.get('action')),
+ )
+ )
+ return True
+ elif current_watchdog is not None:
+ if (
+ str(current_watchdog.model).lower() != watchdog.get('model').lower() or
+ str(current_watchdog.action).lower() != watchdog.get('action').lower()
+ ):
+ watchdogs_service.watchdog_service(current_watchdog.id).update(
+ otypes.Watchdog(
+ model=otypes.WatchdogModel(watchdog.get('model')),
+ action=otypes.WatchdogAction(watchdog.get('action')),
+ )
+ )
+ return True
+ return False
+
+ def __attach_nics(self, entity):
+ # Attach NICs to VM, if specified:
+ nics_service = self._service.service(entity.id).nics_service()
+ for nic in self.param('nics'):
+ if search_by_name(nics_service, nic.get('name')) is None:
+ if not self._module.check_mode:
+ nics_service.add(
+ otypes.Nic(
+ name=nic.get('name'),
+ interface=otypes.NicInterface(
+ nic.get('interface', 'virtio')
+ ),
+ vnic_profile=otypes.VnicProfile(
+ id=self.__get_vnic_profile_id(nic),
+ ) if nic.get('profile_name') else None,
+ mac=otypes.Mac(
+ address=nic.get('mac_address')
+ ) if nic.get('mac_address') else None,
+ )
+ )
+ self.changed = True
+
+ def get_initialization(self):
+ if self._initialization is not None:
+ return self._initialization
+
+ sysprep = self.param('sysprep')
+ cloud_init = self.param('cloud_init')
+ cloud_init_nics = self.param('cloud_init_nics') or []
+ if cloud_init is not None:
+ cloud_init_nics.append(cloud_init)
+
+ if cloud_init or cloud_init_nics:
+ self._initialization = otypes.Initialization(
+ nic_configurations=[
+ otypes.NicConfiguration(
+ boot_protocol=otypes.BootProtocol(
+ nic.pop('nic_boot_protocol').lower()
+ ) if nic.get('nic_boot_protocol') else None,
+ ipv6_boot_protocol=otypes.BootProtocol(
+ nic.pop('nic_boot_protocol_v6').lower()
+ ) if nic.get('nic_boot_protocol_v6') else None,
+ name=nic.pop('nic_name', None),
+ on_boot=True,
+ ip=otypes.Ip(
+ address=nic.pop('nic_ip_address', None),
+ netmask=nic.pop('nic_netmask', None),
+ gateway=nic.pop('nic_gateway', None),
+ version=otypes.IpVersion('v4')
+ ) if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None
+ ) else None,
+ ipv6=otypes.Ip(
+ address=nic.pop('nic_ip_address_v6', None),
+ netmask=nic.pop('nic_netmask_v6', None),
+ gateway=nic.pop('nic_gateway_v6', None),
+ version=otypes.IpVersion('v6')
+ ) if (
+ nic.get('nic_gateway_v6') is not None or
+ nic.get('nic_netmask_v6') is not None or
+ nic.get('nic_ip_address_v6') is not None
+ ) else None,
+ )
+ for nic in cloud_init_nics
+ if (
+ nic.get('nic_boot_protocol_v6') is not None or
+ nic.get('nic_ip_address_v6') is not None or
+ nic.get('nic_gateway_v6') is not None or
+ nic.get('nic_netmask_v6') is not None or
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None or
+ nic.get('nic_boot_protocol') is not None
+ )
+ ] if cloud_init_nics else None,
+ **cloud_init
+ )
+ elif sysprep:
+ self._initialization = otypes.Initialization(
+ **sysprep
+ )
+ return self._initialization
+
+ def __attach_host_devices(self, entity):
+ vm_service = self._service.service(entity.id)
+ host_devices_service = vm_service.host_devices_service()
+ host_devices = self.param('host_devices')
+ updated = False
+ if host_devices:
+ device_names = [dev.name for dev in host_devices_service.list()]
+ for device in host_devices:
+ device_name = device.get('name')
+ state = device.get('state', 'present')
+ if state == 'absent' and device_name in device_names:
+ updated = True
+ if not self._module.check_mode:
+ device_id = get_id_by_name(host_devices_service, device.get('name'))
+ host_devices_service.device_service(device_id).remove()
+
+ elif state == 'present' and device_name not in device_names:
+ updated = True
+ if not self._module.check_mode:
+ host_devices_service.add(
+ otypes.HostDevice(
+ name=device.get('name'),
+ )
+ )
+
+ return updated
+
+
+def _get_role_mappings(module):
+ roleMappings = list()
+ for roleMapping in module.params['role_mappings']:
+ roleMappings.append(
+ otypes.RegistrationRoleMapping(
+ from_=otypes.Role(
+ name=roleMapping['source_name'],
+ ) if roleMapping['source_name'] else None,
+ to=otypes.Role(
+ name=roleMapping['dest_name'],
+ ) if roleMapping['dest_name'] else None,
+ )
+ )
+ return roleMappings
+
+
+def _get_affinity_group_mappings(module):
+ affinityGroupMappings = list()
+
+ for affinityGroupMapping in module.params['affinity_group_mappings']:
+ affinityGroupMappings.append(
+ otypes.RegistrationAffinityGroupMapping(
+ from_=otypes.AffinityGroup(
+ name=affinityGroupMapping['source_name'],
+ ) if affinityGroupMapping['source_name'] else None,
+ to=otypes.AffinityGroup(
+ name=affinityGroupMapping['dest_name'],
+ ) if affinityGroupMapping['dest_name'] else None,
+ )
+ )
+ return affinityGroupMappings
+
+
+def _get_affinity_label_mappings(module):
+ affinityLabelMappings = list()
+
+ for affinityLabelMapping in module.params['affinity_label_mappings']:
+ affinityLabelMappings.append(
+ otypes.RegistrationAffinityLabelMapping(
+ from_=otypes.AffinityLabel(
+ name=affinityLabelMapping['source_name'],
+ ) if affinityLabelMapping['source_name'] else None,
+ to=otypes.AffinityLabel(
+ name=affinityLabelMapping['dest_name'],
+ ) if affinityLabelMapping['dest_name'] else None,
+ )
+ )
+ return affinityLabelMappings
+
+
+def _get_domain_mappings(module):
+ domainMappings = list()
+
+ for domainMapping in module.params['domain_mappings']:
+ domainMappings.append(
+ otypes.RegistrationDomainMapping(
+ from_=otypes.Domain(
+ name=domainMapping['source_name'],
+ ) if domainMapping['source_name'] else None,
+ to=otypes.Domain(
+ name=domainMapping['dest_name'],
+ ) if domainMapping['dest_name'] else None,
+ )
+ )
+ return domainMappings
+
+
+def _get_lun_mappings(module):
+ lunMappings = list()
+ for lunMapping in module.params['lun_mappings']:
+ lunMappings.append(
+ otypes.RegistrationLunMapping(
+ from_=otypes.Disk(
+ lun_storage=otypes.HostStorage(
+ type=otypes.StorageType(lunMapping['source_storage_type'])
+ if (lunMapping['source_storage_type'] in
+ ['iscsi', 'fcp']) else None,
+ logical_units=[
+ otypes.LogicalUnit(
+ id=lunMapping['source_logical_unit_id'],
+ )
+ ],
+ ),
+ ) if lunMapping['source_logical_unit_id'] else None,
+ to=otypes.Disk(
+ lun_storage=otypes.HostStorage(
+ type=otypes.StorageType(lunMapping['dest_storage_type'])
+ if (lunMapping['dest_storage_type'] in
+ ['iscsi', 'fcp']) else None,
+ logical_units=[
+ otypes.LogicalUnit(
+ id=lunMapping.get('dest_logical_unit_id'),
+ port=lunMapping.get('dest_logical_unit_port'),
+ portal=lunMapping.get('dest_logical_unit_portal'),
+ address=lunMapping.get('dest_logical_unit_address'),
+ target=lunMapping.get('dest_logical_unit_target'),
+ password=lunMapping.get('dest_logical_unit_password'),
+ username=lunMapping.get('dest_logical_unit_username'),
+ )
+ ],
+ ),
+ ) if lunMapping['dest_logical_unit_id'] else None,
+ ),
+ ),
+ return lunMappings
+
+
+def _get_cluster_mappings(module):
+ clusterMappings = list()
+
+ for clusterMapping in module.params['cluster_mappings']:
+ clusterMappings.append(
+ otypes.RegistrationClusterMapping(
+ from_=otypes.Cluster(
+ name=clusterMapping['source_name'],
+ ),
+ to=otypes.Cluster(
+ name=clusterMapping['dest_name'],
+ ) if clusterMapping['dest_name'] else None,
+ )
+ )
+ return clusterMappings
+
+
+def _get_vnic_profile_mappings(module):
+ vnicProfileMappings = list()
+
+ for vnicProfileMapping in module.params['vnic_profile_mappings']:
+ vnicProfileMappings.append(
+ otypes.VnicProfileMapping(
+ source_network_name=vnicProfileMapping['source_network_name'],
+ source_network_profile_name=vnicProfileMapping['source_profile_name'],
+ target_vnic_profile=otypes.VnicProfile(
+ id=vnicProfileMapping['target_profile_id'],
+ ) if vnicProfileMapping['target_profile_id'] else None,
+ )
+ )
+
+ return vnicProfileMappings
+
+
+def import_vm(module, connection):
+ vms_service = connection.system_service().vms_service()
+ if search_by_name(vms_service, module.params['name']) is not None:
+ return False
+
+ events_service = connection.system_service().events_service()
+ last_event = events_service.list(max=1)[0]
+
+ external_type = [
+ tmp for tmp in ['kvm', 'xen', 'vmware']
+ if module.params[tmp] is not None
+ ][0]
+
+ external_vm = module.params[external_type]
+ imports_service = connection.system_service().external_vm_imports_service()
+ imported_vm = imports_service.add(
+ otypes.ExternalVmImport(
+ vm=otypes.Vm(
+ name=module.params['name']
+ ),
+ name=external_vm.get('name'),
+ username=external_vm.get('username', 'test'),
+ password=external_vm.get('password', 'test'),
+ provider=otypes.ExternalVmProviderType(external_type),
+ url=external_vm.get('url'),
+ cluster=otypes.Cluster(
+ name=module.params['cluster'],
+ ) if module.params['cluster'] else None,
+ storage_domain=otypes.StorageDomain(
+ name=external_vm.get('storage_domain'),
+ ) if external_vm.get('storage_domain') else None,
+ sparse=external_vm.get('sparse', True),
+ host=otypes.Host(
+ name=module.params['host'],
+ ) if module.params['host'] else None,
+ )
+ )
+
+ # Wait until event with code 1152 for our VM don't appear:
+ vms_service = connection.system_service().vms_service()
+ wait(
+ service=vms_service.vm_service(imported_vm.vm.id),
+ condition=lambda vm: len(events_service.list(
+ from_=int(last_event.id),
+ search='type=1152 and vm.id=%s' % vm.id,
+ )
+ ) > 0 if vm is not None else False,
+ fail_condition=lambda vm: vm is None,
+ timeout=module.params['timeout'],
+ poll_interval=module.params['poll_interval'],
+ )
+ return True
+
+
+def control_state(vm, vms_service, module):
+ if vm is None:
+ return
+
+ force = module.params['force']
+ state = module.params['state']
+
+ vm_service = vms_service.vm_service(vm.id)
+ if vm.status == otypes.VmStatus.IMAGE_LOCKED:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ elif vm.status == otypes.VmStatus.SAVING_STATE:
+ # Result state is SUSPENDED, we should wait to be suspended:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
+ )
+ elif (
+ vm.status == otypes.VmStatus.UNASSIGNED or
+ vm.status == otypes.VmStatus.UNKNOWN
+ ):
+ # Invalid states:
+ module.fail_json(msg="Not possible to control VM, if it's in '{0}' status".format(vm.status))
+ elif vm.status == otypes.VmStatus.POWERING_DOWN:
+ if (force and state == 'stopped') or state == 'absent':
+ vm_service.stop()
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ else:
+ # If VM is powering down, wait to be DOWN or UP.
+ # VM can end in UP state in case there is no GA
+ # or ACPI on the VM or shutdown operation crashed:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present', choices=[
+ 'absent', 'next_run', 'present', 'registered', 'running', 'stopped', 'suspended', 'exported', 'reboot'
+ ]),
+ name=dict(type='str'),
+ id=dict(type='str'),
+ cluster=dict(type='str'),
+ allow_partial_import=dict(type='bool'),
+ template=dict(type='str'),
+ template_version=dict(type='int'),
+ use_latest_template_version=dict(type='bool'),
+ storage_domain=dict(type='str'),
+ disk_format=dict(type='str', default='cow', choices=['cow', 'raw']),
+ disks=dict(type='list', default=[], elements='dict'),
+ memory=dict(type='str'),
+ memory_guaranteed=dict(type='str'),
+ memory_max=dict(type='str'),
+ cpu_sockets=dict(type='int'),
+ cpu_cores=dict(type='int'),
+ cpu_shares=dict(type='int'),
+ cpu_threads=dict(type='int'),
+ type=dict(type='str', choices=['server', 'desktop', 'high_performance']),
+ operating_system=dict(type='str'),
+ cd_iso=dict(type='str'),
+ boot_devices=dict(type='list', choices=['cdrom', 'hd', 'network'], elements='str'),
+ vnic_profile_mappings=dict(default=[], type='list', elements='dict'),
+ cluster_mappings=dict(default=[], type='list', elements='dict'),
+ role_mappings=dict(default=[], type='list', elements='dict'),
+ affinity_group_mappings=dict(default=[], type='list', elements='dict'),
+ affinity_label_mappings=dict(default=[], type='list', elements='dict'),
+ lun_mappings=dict(default=[], type='list', elements='dict'),
+ domain_mappings=dict(default=[], type='list', elements='dict'),
+ reassign_bad_macs=dict(default=None, type='bool'),
+ boot_menu=dict(type='bool'),
+ bios_type=dict(type='str', choices=['i440fx_sea_bios', 'q35_ovmf', 'q35_sea_bios', 'q35_secure_boot']),
+ serial_console=dict(type='bool'),
+ usb_support=dict(type='bool'),
+ sso=dict(type='bool'),
+ quota_id=dict(type='str'),
+ high_availability=dict(type='bool'),
+ high_availability_priority=dict(type='int'),
+ lease=dict(type='str'),
+ stateless=dict(type='bool'),
+ delete_protected=dict(type='bool'),
+ custom_emulated_machine=dict(type='str'),
+ force=dict(type='bool', default=False),
+ nics=dict(type='list', default=[], elements='dict'),
+ cloud_init=dict(type='dict'),
+ cloud_init_nics=dict(type='list', default=[], elements='dict'),
+ cloud_init_persist=dict(type='bool', default=False, aliases=['sysprep_persist']),
+ kernel_params_persist=dict(type='bool', default=False),
+ sysprep=dict(type='dict'),
+ host=dict(type='str'),
+ clone=dict(type='bool', default=False),
+ clone_permissions=dict(type='bool', default=False),
+ kernel_path=dict(type='str'),
+ initrd_path=dict(type='str'),
+ kernel_params=dict(type='str'),
+ instance_type=dict(type='str'),
+ description=dict(type='str'),
+ comment=dict(type='str'),
+ timezone=dict(type='str'),
+ serial_policy=dict(type='str', choices=['vm', 'host', 'custom']),
+ serial_policy_value=dict(type='str'),
+ vmware=dict(type='dict'),
+ xen=dict(type='dict'),
+ kvm=dict(type='dict'),
+ cpu_mode=dict(type='str'),
+ placement_policy=dict(type='str'),
+ custom_compatibility_version=dict(type='str'),
+ ticket=dict(type='bool', default=None),
+ cpu_pinning=dict(type='list', elements='dict'),
+ soundcard_enabled=dict(type='bool', default=None),
+ smartcard_enabled=dict(type='bool', default=None),
+ io_threads=dict(type='int', default=None),
+ ballooning_enabled=dict(type='bool', default=None),
+ rng_device=dict(type='str'),
+ numa_tune_mode=dict(type='str', choices=['interleave', 'preferred', 'strict']),
+ numa_nodes=dict(type='list', default=[], elements='dict'),
+ custom_properties=dict(type='list', elements='dict'),
+ watchdog=dict(type='dict'),
+ host_devices=dict(type='list', elements='dict'),
+ graphical_console=dict(
+ type='dict',
+ options=dict(
+ headless_mode=dict(type='bool'),
+ protocol=dict(type='list', elements='str'),
+ disconnect_action=dict(type='str'),
+ keyboard_layout=dict(type='str'),
+ monitors=dict(type='int'),
+ )
+ ),
+ exclusive=dict(type='bool'),
+ export_domain=dict(default=None),
+ export_ova=dict(type='dict'),
+ force_migrate=dict(type='bool'),
+ migrate=dict(type='bool', default=None),
+ next_run=dict(type='bool'),
+ snapshot_name=dict(type='str'),
+ snapshot_vm=dict(type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['id', 'name']],
+ required_if=[
+ ('state', 'registered', ['storage_domain']),
+ ],
+ required_together=[['snapshot_name', 'snapshot_vm']]
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ state = module.params['state']
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vms_module = VmsModule(
+ connection=connection,
+ module=module,
+ service=vms_service,
+ )
+ vm = vms_module.search_entity(list_params={'all_content': True})
+
+ # Boolean variable to mark if vm existed before module was executed
+ vm_existed = True if vm else False
+ control_state(vm, vms_service, module)
+ if state in ('present', 'running', 'next_run'):
+ if module.params['xen'] or module.params['kvm'] or module.params['vmware']:
+ vms_module.changed = import_vm(module, connection)
+
+ # In case of wait=false and state=running, waits for VM to be created
+ # In case VM don't exist, wait for VM DOWN state,
+ # otherwise don't wait for any state, just update VM:
+ ret = vms_module.create(
+ entity=vm,
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ update_params={'next_run': module.params['next_run']} if module.params['next_run'] is not None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ _wait=True if not module.params['wait'] and state == 'running' else module.params['wait'],
+ )
+ # If VM is going to be created and check_mode is on, return now:
+ if module.check_mode and ret.get('id') is None:
+ module.exit_json(**ret)
+
+ vms_module.post_present(ret['id'])
+ # Run the VM if it was just created, else don't run it:
+ if state == 'running':
+ def kernel_persist_check():
+ return (module.params.get('kernel_params') or
+ module.params.get('initrd_path') or
+ module.params.get('kernel_path')
+ and not module.params.get('cloud_init_persist'))
+ initialization = vms_module.get_initialization()
+ ret = vms_module.action(
+ action='start',
+ post_action=vms_module._post_start_action,
+ action_condition=lambda vm: (
+ vm.status not in [
+ otypes.VmStatus.MIGRATING,
+ otypes.VmStatus.POWERING_UP,
+ otypes.VmStatus.REBOOT_IN_PROGRESS,
+ otypes.VmStatus.WAIT_FOR_LAUNCH,
+ otypes.VmStatus.UP,
+ otypes.VmStatus.RESTORING_STATE,
+ ]
+ ),
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ # Start action kwargs:
+ use_cloud_init=True if not module.params.get('cloud_init_persist') and module.params.get('cloud_init') else None,
+ use_sysprep=True if not module.params.get('cloud_init_persist') and module.params.get('sysprep') else None,
+ vm=otypes.Vm(
+ placement_policy=otypes.VmPlacementPolicy(
+ hosts=[otypes.Host(name=module.params['host'])]
+ ) if module.params['host'] else None,
+ initialization=initialization,
+ os=otypes.OperatingSystem(
+ cmdline=module.params.get('kernel_params'),
+ initrd=module.params.get('initrd_path'),
+ kernel=module.params.get('kernel_path'),
+ ) if (kernel_persist_check()) else None,
+ ) if (
+ kernel_persist_check() or
+ module.params.get('host') or
+ initialization is not None
+ and not module.params.get('cloud_init_persist')
+ ) else None,
+ )
+
+ if module.params['ticket']:
+ vm_service = vms_service.vm_service(ret['id'])
+ graphics_consoles_service = vm_service.graphics_consoles_service()
+ graphics_console = graphics_consoles_service.list()[0]
+ console_service = graphics_consoles_service.console_service(graphics_console.id)
+ ticket = console_service.remote_viewer_connection_file()
+ if ticket:
+ ret['vm']['remote_vv_file'] = ticket
+
+ if state == 'next_run':
+ # Apply next run configuration, if needed:
+ vm = vms_service.vm_service(ret['id']).get()
+ if vm.next_run_configuration_exists:
+ ret = vms_module.action(
+ action='reboot',
+ entity=vm,
+ action_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ )
+ # Allow migrate vm when state present.
+ if vm_existed:
+ vms_module._migrate_vm(vm)
+ ret['changed'] = vms_module.changed
+ elif state == 'stopped':
+ if module.params['xen'] or module.params['kvm'] or module.params['vmware']:
+ vms_module.changed = import_vm(module, connection)
+
+ ret = vms_module.create(
+ entity=vm,
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ )
+ if module.params['force']:
+ ret = vms_module.action(
+ action='stop',
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=vms_module.wait_for_down,
+ )
+ else:
+ ret = vms_module.action(
+ action='shutdown',
+ pre_action=vms_module._pre_shutdown_action,
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=vms_module.wait_for_down,
+ )
+ vms_module.post_present(ret['id'])
+ elif state == 'suspended':
+ ret = vms_module.create(
+ entity=vm,
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ )
+ vms_module.post_present(ret['id'])
+ ret = vms_module.action(
+ action='suspend',
+ pre_action=vms_module._pre_suspend_action,
+ action_condition=lambda vm: vm.status != otypes.VmStatus.SUSPENDED,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
+ )
+ elif state == 'absent':
+ ret = vms_module.remove()
+ elif state == 'registered':
+ storage_domains_service = connection.system_service().storage_domains_service()
+
+ # Find the storage domain with unregistered VM:
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ vms_service = storage_domain_service.vms_service()
+
+ # Find the unregistered VM we want to register:
+ vms = vms_service.list(unregistered=True)
+ vm = next(
+ (vm for vm in vms if (vm.id == module.params['id'] or vm.name == module.params['name'])),
+ None
+ )
+ changed = False
+ if vm is None:
+ vm = vms_module.search_entity()
+ if vm is None:
+ raise ValueError(
+ "VM '%s(%s)' wasn't found." % (module.params['name'], module.params['id'])
+ )
+ else:
+ # Register the vm into the system:
+ changed = True
+ vm_service = vms_service.vm_service(vm.id)
+ vm_service.register(
+ allow_partial_import=module.params['allow_partial_import'],
+ cluster=otypes.Cluster(
+ name=module.params['cluster']
+ ) if module.params['cluster'] else None,
+ vnic_profile_mappings=_get_vnic_profile_mappings(module)
+ if module.params['vnic_profile_mappings'] else None,
+ reassign_bad_macs=module.params['reassign_bad_macs']
+ if module.params['reassign_bad_macs'] is not None else None,
+ registration_configuration=otypes.RegistrationConfiguration(
+ cluster_mappings=_get_cluster_mappings(module),
+ role_mappings=_get_role_mappings(module),
+ domain_mappings=_get_domain_mappings(module),
+ lun_mappings=_get_lun_mappings(module),
+ affinity_group_mappings=_get_affinity_group_mappings(module),
+ affinity_label_mappings=_get_affinity_label_mappings(module),
+ ) if (module.params['cluster_mappings']
+ or module.params['role_mappings']
+ or module.params['domain_mappings']
+ or module.params['lun_mappings']
+ or module.params['affinity_group_mappings']
+ or module.params['affinity_label_mappings']) else None
+ )
+
+ if module.params['wait']:
+ vm = vms_module.wait_for_import()
+ else:
+ # Fetch vm to initialize return.
+ vm = vm_service.get()
+ ret = {
+ 'changed': changed,
+ 'id': vm.id,
+ 'vm': get_dict_of_struct(vm)
+ }
+ elif state == 'exported':
+ if module.params['export_domain']:
+ export_service = vms_module._get_export_domain_service()
+ export_vm = search_by_attributes(export_service.vms_service(), id=vm.id)
+
+ ret = vms_module.action(
+ entity=vm,
+ action='export',
+ action_condition=lambda t: export_vm is None or module.params['exclusive'],
+ wait_condition=lambda t: t is not None,
+ post_action=vms_module.post_export_action,
+ storage_domain=otypes.StorageDomain(id=export_service.get().id),
+ exclusive=module.params['exclusive'],
+ )
+ elif module.params['export_ova']:
+ export_vm = module.params['export_ova']
+ ret = vms_module.action(
+ entity=vm,
+ action='export_to_path_on_host',
+ host=otypes.Host(name=export_vm.get('host')),
+ directory=export_vm.get('directory'),
+ filename=export_vm.get('filename'),
+ )
+ elif state == 'reboot':
+ ret = vms_module.action(
+ action='reboot',
+ entity=vm,
+ action_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ )
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_info.py
new file mode 100644
index 00000000..894a1fba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_info.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vm_info
+short_description: Retrieve information about one or more oVirt/RHV virtual machines
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines."
+ - This module was called C(ovirt_vm_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_vm_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search VM X from cluster Y use following pattern:
+ name=X and cluster=Y"
+ type: str
+ all_content:
+ description:
+ - "If I(true) all the attributes of the virtual machines should be
+ included in the response."
+ type: bool
+ case_sensitive:
+ description:
+ - "If I(true) performed search will take case into account."
+ type: bool
+ default: true
+ max:
+ description:
+ - "The maximum number of results to return."
+ type: int
+ next_run:
+ description:
+ - "Indicates if the returned result describes the virtual machine as it is currently running or if describes
+ the virtual machine with the modifications that have already been performed but that will only come into
+ effect when the virtual machine is restarted. By default the value is set by engine."
+ type: bool
+ current_cd:
+ description:
+ - "If I(true) it will get from all virtual machines current attached cd."
+ type: bool
+ version_added: 1.2.0
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all VMs which names start with C(centos) and
+# belong to cluster C(west):
+- ovirt.ovirt.ovirt_vm_info:
+ pattern: name=centos* and cluster=west
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms }}"
+
+# Gather info about next run configuration of virtual machine named myvm
+- ovirt.ovirt.ovirt_vm_info:
+ pattern: name=myvm
+ next_run: true
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms[0] }}"
+'''
+
+RETURN = '''
+ovirt_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ current_cd=dict(default=False, type='bool'),
+ next_run=dict(default=None, type='bool'),
+ case_sensitive=dict(default=True, type='bool'),
+ max=dict(default=None, type='int'),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vms = vms_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content'],
+ case_sensitive=module.params['case_sensitive'],
+ max=module.params['max'],
+ )
+ if module.params['next_run']:
+ vms = [vms_service.vm_service(vm.id).get(next_run=True) for vm in vms]
+
+ result = dict(
+ ovirt_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vms
+ ],
+ )
+ for i, vm in enumerate(result['ovirt_vms']):
+ if module.params['current_cd']:
+ vm_service = vms_service.vm_service(vm['id'])
+ cdroms_service = vm_service.cdroms_service()
+ cdrom_device = cdroms_service.list()[0]
+ cdrom_service = cdroms_service.cdrom_service(cdrom_device.id)
+ result['ovirt_vms'][i]['current_cd'] = get_dict_of_struct(
+ struct=cdrom_service.get(current=True),
+ connection=connection,
+ )
+ else:
+ result['ovirt_vms'][i]['current_cd'] = {}
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_os_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_os_info.py
new file mode 100644
index 00000000..73185678
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vm_os_info.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vm_os_info
+short_description: Retrieve information on all supported oVirt/RHV operating systems
+version_added: "1.1.0"
+author:
+- "Martin Necas (@mnecas)"
+- "Chris Brown (@snecklifter)"
+description:
+ - "Retrieve information on all supported oVirt/RHV operating systems."
+notes:
+ - "This module returns a variable C(ovirt_operating_systems), which
+ contains a list of operating systems. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ filter_keys:
+ description:
+ - "List of attributes which should be in returned."
+ type: list
+ elements: str
+ name:
+ description:
+ - "Name of the operating system which should be returned."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Look at ovirt_auth module to see how to reuse authentication:
+
+- ovirt.ovirt.ovirt_vm_os_info:
+ auth: "{{ ovirt_auth }}"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_operating_systems }}"
+
+- ovirt.ovirt.ovirt_vm_os_info:
+ auth: "{{ ovirt_auth }}"
+ filter_keys: name,architecture
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_operating_systems }}"
+'''
+
+RETURN = '''
+ovirt_operating_systems:
+ description: "List of dictionaries describing the operating systems. Operating system attributes are mapped to dictionary keys,
+ all operating systems attributes can be found at following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/operating_system_info."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ filter_keys=dict(default=None, type='list', elements='str'),
+ name=dict(default=None, type='str'),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ operating_systems_service = connection.system_service().operating_systems_service()
+ operating_systems = operating_systems_service.list()
+ if module.params['name']:
+ operating_systems = filter(lambda x: x.name == module.params['name'], operating_systems)
+ result = dict(
+ ovirt_operating_systems=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ filter_keys=module.params['filter_keys'],
+ ) for c in operating_systems
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool.py
new file mode 100644
index 00000000..08326058
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool.py
@@ -0,0 +1,491 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vmpool
+short_description: Module to manage VM pools in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage VM pools in oVirt/RHV."
+options:
+ id:
+ description:
+ - "ID of the vmpool to manage."
+ type: str
+ name:
+ description:
+ - "Name of the VM pool to manage."
+ type: str
+ required: true
+ comment:
+ description:
+ - Comment of the Virtual Machine pool.
+ type: str
+ state:
+ description:
+ - "Should the VM pool be present/absent."
+ - "Note that when C(state) is I(absent) all VMs in VM pool are stopped and removed."
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ template:
+ description:
+ - "Name of the template, which will be used to create VM pool."
+ type: str
+ description:
+ description:
+ - "Description of the VM pool."
+ type: str
+ cluster:
+ description:
+ - "Name of the cluster, where VM pool should be created."
+ type: str
+ type:
+ description:
+ - "Type of the VM pool. Either manual or automatic."
+ - "C(manual) - The administrator is responsible for explicitly returning the virtual machine to the pool.
+ The virtual machine reverts to the original base image after the administrator returns it to the pool."
+ - "C(Automatic) - When the virtual machine is shut down, it automatically reverts to its base image and
+ is returned to the virtual machine pool."
+ - "Default value is set by engine."
+ choices: ['manual', 'automatic']
+ type: str
+ vm_per_user:
+ description:
+ - "Maximum number of VMs a single user can attach to from this pool."
+ - "Default value is set by engine."
+ type: int
+ prestarted:
+ description:
+ - "Number of pre-started VMs defines the number of VMs in run state, that are waiting
+ to be attached to Users."
+ - "Default value is set by engine."
+ type: int
+ vm_count:
+ description:
+ - "Number of VMs in the pool."
+ - "Default value is set by engine."
+ type: int
+ vm:
+ description:
+ - "For creating vm pool without editing template."
+ - "Note: You can use C(vm) only for creating vm pool."
+ type: dict
+ suboptions:
+ comment:
+ description:
+ - Comment of the Virtual Machine.
+ timezone:
+ description:
+ - Sets time zone offset of the guest hardware clock.
+ - For example C(Etc/GMT)
+ memory:
+ description:
+ - Amount of memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ memory_guaranteed:
+ description:
+ - Amount of minimal guaranteed memory of the Virtual Machine.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
+ - Default value is set by engine.
+ memory_max:
+ description:
+ - Upper bound of virtual machine memory up to which memory hot-plug can be performed.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
+ - Default value is set by engine.
+ cloud_init:
+ description:
+ - Dictionary with values for Unix-like Virtual Machine initialization using cloud init.
+ - C(host_name) - Hostname to be set to Virtual Machine when deployed.
+ - C(timezone) - Timezone to be set to Virtual Machine when deployed.
+ - C(user_name) - Username to be used to set password to Virtual Machine when deployed.
+ - C(root_password) - Password to be set for user specified by C(user_name) parameter.
+ - C(authorized_ssh_keys) - Use this SSH keys to login to Virtual Machine.
+ - C(regenerate_ssh_keys) - If I(True) SSH keys will be regenerated on Virtual Machine.
+ - C(custom_script) - Cloud-init script which will be executed on Virtual Machine when deployed. This is appended to the end of the
+ cloud-init script generated by any other options. For further information, refer to cloud-init User-Data documentation.
+ - C(dns_servers) - DNS servers to be configured on Virtual Machine, maximum of two, space-separated.
+ - C(dns_search) - DNS search domains to be configured on Virtual Machine.
+ - C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
+ - C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine.
+ - C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine.
+ - C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine.
+ - C(nic_name) - Set name to network interface of Virtual Machine.
+ sso:
+ description:
+ - "I(True) enable Single Sign On by Guest Agent, I(False) to disable it. By default is chosen by oVirt/RHV engine."
+ type: bool
+ smartcard_enabled:
+ description:
+ - "If I(true), use smart card authentication."
+ type: bool
+ nics:
+ description:
+ - List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
+ - C(name) - Name of the NIC.
+ - C(profile_name) - Profile name where NIC should be attached.
+ - C(interface) - Type of the network interface. One of following I(virtio), I(e1000), I(rtl8139), default is I(virtio).
+ - C(mac_address) - Custom MAC address of the network interface, by default it's obtained from MAC pool.
+ - NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
+ - To manage NICs of the VM in more depth please use M(ovirt.ovirt.ovirt_nics) module instead.
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Create VM pool from template
+ ovirt.ovirt.ovirt_vmpool:
+ cluster: mycluster
+ name: myvmpool
+ template: rhel7
+ vm_count: 2
+ prestarted: 2
+ vm_per_user: 1
+
+- name: Remove vmpool, note that all VMs in pool will be stopped and removed
+ ovirt.ovirt.ovirt_vmpool:
+ state: absent
+ name: myvmpool
+
+- name: Change Pool Name
+ ovirt.ovirt.ovirt_vmpool:
+ id: 00000000-0000-0000-0000-000000000000
+ name: "new_pool_name"
+
+- name: Create vm pool and override the pool values
+ ovirt.ovirt.ovirt_vmpool:
+ cluster: mycluster
+ name: vmpool
+ template: blank
+ vm_count: 2
+ prestarted: 1
+ vm_per_user: 1
+ vm:
+ memory: 4GiB
+ memory_guaranteed: 4GiB
+ memory_max: 10GiB
+ comment: vncomment
+ cloud_init:
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+ nic_name: eth1
+ host_name: example.com
+ custom_script: |
+ write_files:
+ - content: |
+ Hello, world!
+ path: /tmp/greeting.txt
+ permissions: '0644'
+ user_name: root
+ root_password: super_password
+ nics:
+ - name: nicname
+ interface: virtio
+ profile_name: network
+
+'''
+
+RETURN = '''
+id:
+ description: ID of the VM pool which is managed
+ returned: On success if VM pool is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+vm_pool:
+ description: "Dictionary of all the VM pool attributes. VM pool attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool."
+ returned: On success if VM pool is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_params,
+ check_sdk,
+ create_connection,
+ equal,
+ get_link_name,
+ ovirt_full_argument_spec,
+ wait,
+ convert_to_bytes,
+ search_by_name,
+)
+
+
+class VmPoolsModule(BaseModule):
+ def __init__(self, *args, **kwargs):
+ super(VmPoolsModule, self).__init__(*args, **kwargs)
+ self._initialization = None
+
+ def build_entity(self):
+ vm = self.param('vm')
+ return otypes.VmPool(
+ id=self._module.params['id'],
+ name=self._module.params['name'],
+ description=self._module.params['description'],
+ comment=self._module.params['comment'],
+ cluster=otypes.Cluster(
+ name=self._module.params['cluster']
+ ) if self._module.params['cluster'] else None,
+ template=otypes.Template(
+ name=self._module.params['template']
+ ) if self._module.params['template'] else None,
+ max_user_vms=self._module.params['vm_per_user'],
+ prestarted_vms=self._module.params['prestarted'],
+ size=self._module.params['vm_count'],
+ type=otypes.VmPoolType(
+ self._module.params['type']
+ ) if self._module.params['type'] else None,
+ vm=self.build_vm(vm) if self._module.params['vm'] else None,
+ )
+
+ def build_vm(self, vm):
+ return otypes.Vm(
+ comment=vm.get('comment'),
+ memory=convert_to_bytes(
+ vm.get('memory')
+ ) if vm.get('memory') else None,
+ memory_policy=otypes.MemoryPolicy(
+ guaranteed=convert_to_bytes(vm.get('memory_guaranteed')),
+ max=convert_to_bytes(vm.get('memory_max')),
+ ) if any((
+ vm.get('memory_guaranteed'),
+ vm.get('memory_max')
+ )) else None,
+ initialization=self.get_initialization(vm),
+ display=otypes.Display(
+ smartcard_enabled=vm.get('smartcard_enabled')
+ ) if vm.get('smartcard_enabled') is not None else None,
+ sso=(
+ otypes.Sso(
+ methods=[otypes.Method(id=otypes.SsoMethod.GUEST_AGENT)] if vm.get('sso') else []
+ )
+ ) if vm.get('sso') is not None else None,
+ time_zone=otypes.TimeZone(
+ name=vm.get('timezone'),
+ ) if vm.get('timezone') else None,
+ )
+
+ def get_initialization(self, vm):
+ if self._initialization is not None:
+ return self._initialization
+
+ sysprep = vm.get('sysprep')
+ cloud_init = vm.get('cloud_init')
+ cloud_init_nics = vm.get('cloud_init_nics') or []
+ if cloud_init is not None:
+ cloud_init_nics.append(cloud_init)
+
+ if cloud_init or cloud_init_nics:
+ self._initialization = otypes.Initialization(
+ nic_configurations=[
+ otypes.NicConfiguration(
+ boot_protocol=otypes.BootProtocol(
+ nic.pop('nic_boot_protocol').lower()
+ ) if nic.get('nic_boot_protocol') else None,
+ name=nic.pop('nic_name', None),
+ on_boot=True,
+ ip=otypes.Ip(
+ address=nic.pop('nic_ip_address', None),
+ netmask=nic.pop('nic_netmask', None),
+ gateway=nic.pop('nic_gateway', None),
+ ) if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None
+ ) else None,
+ )
+ for nic in cloud_init_nics
+ if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None or
+ nic.get('nic_boot_protocol') is not None
+ )
+ ] if cloud_init_nics else None,
+ **cloud_init
+ )
+ elif sysprep:
+ self._initialization = otypes.Initialization(
+ **sysprep
+ )
+ return self._initialization
+
+ def get_vms(self, entity):
+ vms = self._connection.system_service().vms_service().list()
+ resp = []
+ for vm in vms:
+ if vm.vm_pool is not None and vm.vm_pool.id == entity.id:
+ resp.append(vm)
+ return resp
+
+ def post_create(self, entity):
+ vm_param = self.param('vm')
+ if vm_param is not None and vm_param.get('nics') is not None:
+ vms = self.get_vms(entity)
+ for vm in vms:
+ self.__attach_nics(vm, vm_param)
+
+ def __attach_nics(self, entity, vm_param):
+ # Attach NICs to VM, if specified:
+ vms_service = self._connection.system_service().vms_service()
+ nics_service = vms_service.service(entity.id).nics_service()
+ for nic in vm_param.get('nics'):
+ if search_by_name(nics_service, nic.get('name')) is None:
+ if not self._module.check_mode:
+ nics_service.add(
+ otypes.Nic(
+ name=nic.get('name'),
+ interface=otypes.NicInterface(
+ nic.get('interface', 'virtio')
+ ),
+ vnic_profile=otypes.VnicProfile(
+ id=self.__get_vnic_profile_id(nic),
+ ) if nic.get('profile_name') else None,
+ mac=otypes.Mac(
+ address=nic.get('mac_address')
+ ) if nic.get('mac_address') else None,
+ )
+ )
+ self.changed = True
+
+ def __get_vnic_profile_id(self, nic):
+ """
+ Return VNIC profile ID looked up by it's name, because there can be
+ more VNIC profiles with same name, other criteria of filter is cluster.
+ """
+ vnics_service = self._connection.system_service().vnic_profiles_service()
+ clusters_service = self._connection.system_service().clusters_service()
+ cluster = search_by_name(clusters_service, self.param('cluster'))
+ profiles = [
+ profile for profile in vnics_service.list()
+ if profile.name == nic.get('profile_name')
+ ]
+ cluster_networks = [
+ net.id for net in self._connection.follow_link(cluster.networks)
+ ]
+ try:
+ return next(
+ profile.id for profile in profiles
+ if profile.network.id in cluster_networks
+ )
+ except StopIteration:
+ raise Exception(
+ "Profile '%s' was not found in cluster '%s'" % (
+ nic.get('profile_name'),
+ self.param('cluster')
+ )
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('name'), entity.name) and
+ equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('comment'), entity.comment) and
+ equal(self._module.params.get('vm_per_user'), entity.max_user_vms) and
+ equal(self._module.params.get('prestarted'), entity.prestarted_vms) and
+ equal(self._module.params.get('vm_count'), entity.size)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ id=dict(default=None),
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ template=dict(default=None),
+ cluster=dict(default=None),
+ description=dict(default=None),
+ vm=dict(default=None, type='dict'),
+ comment=dict(default=None),
+ vm_per_user=dict(default=None, type='int'),
+ prestarted=dict(default=None, type='int'),
+ vm_count=dict(default=None, type='int'),
+ type=dict(default=None, choices=['automatic', 'manual']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vm_pools_service = connection.system_service().vm_pools_service()
+ vm_pools_module = VmPoolsModule(
+ connection=connection,
+ module=module,
+ service=vm_pools_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = vm_pools_module.create()
+
+ # Wait for all VM pool VMs to be created:
+ if module.params['wait']:
+ vms_service = connection.system_service().vms_service()
+ for vm in vms_service.list(search='pool=%s' % module.params['name']):
+ wait(
+ service=vms_service.service(vm.id),
+ condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
+ timeout=module.params['timeout'],
+ )
+
+ elif state == 'absent':
+ ret = vm_pools_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool_info.py
new file mode 100644
index 00000000..8b0a3bf0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vmpool_info.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vmpool_info
+short_description: Retrieve information about one or more oVirt/RHV vmpools
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV vmpools."
+ - This module was called C(ovirt_vmpool_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(ovirt.ovirt.ovirt_vmpool_info) module no longer returns C(ansible_facts)!
+notes:
+ - "This module returns a variable C(ovirt_vmpools), which
+ contains a list of vmpools. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search vmpool X: name=X"
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information about all vm pools which names start with C(centos):
+- ovirt.ovirt.ovirt_vmpool_info:
+ pattern: name=centos*
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_vm_pools }}"
+'''
+
+RETURN = '''
+ovirt_vm_pools:
+ description: "List of dictionaries describing the vmpools. Vm pool attributes are mapped to dictionary keys,
+ all vmpools attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vmpools_service = connection.system_service().vm_pools_service()
+ vmpools = vmpools_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_vm_pools=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vmpools
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile.py
new file mode 100644
index 00000000..b8a080e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vnic_profile
+short_description: Module to manage vNIC profile of network in oVirt/RHV
+version_added: "1.0.0"
+author:
+- "Ondra Machacek (@machacekondra)"
+- "Martin Necas (@mnecas)"
+description:
+ - "Module to manage vNIC profile of network in oVirt/RHV"
+options:
+ name:
+ description:
+ - "A human-readable name in plain text."
+ required: true
+ type: str
+ state:
+ description:
+ - "Should the vNIC be absent/present."
+ choices: ['absent', 'present']
+ default: present
+ type: str
+ description:
+ description:
+ - "A human-readable description in plain text."
+ type: str
+ data_center:
+ description:
+ - "Datacenter name where network reside."
+ type: str
+ required: true
+ network:
+ description:
+ - "Name of network to which is vNIC attached."
+ type: str
+ required: true
+ network_filter:
+ description:
+ - "The network filter enables to filter packets send to/from the VM's nic according to defined rules."
+ type: str
+ custom_properties:
+ description:
+ - "Custom properties applied to the vNIC profile."
+ - "Custom properties is a list of dictionary which can have following values:"
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - "Name of the custom property. For example: I(hugepages), I(vhost), I(sap_agent), etc."
+ regexp:
+ description:
+ - Regular expression to set for custom property.
+ value:
+ description:
+ - Value to set for custom property.
+ qos:
+ description:
+ - "Quality of Service attributes regulate inbound and outbound network traffic of the NIC."
+ type: str
+ port_mirroring:
+ description:
+ - "Enables port mirroring."
+ type: bool
+ pass_through:
+ description:
+ - "Enables passthrough to an SR-IOV-enabled host NIC."
+ - "When enabled C(qos) and C(network_filter) are automatically set to None and C(port_mirroring) to False."
+ - "When enabled and C(migratable) not specified then C(migratable) is enabled."
+ - "Port mirroring, QoS and network filters are not supported on passthrough profiles."
+ choices: ['disabled', 'enabled']
+ type: str
+ migratable:
+ description:
+ - "Marks whether pass_through NIC is migratable or not."
+ type: bool
+extends_documentation_fragment: ovirt.ovirt.ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+- name: Add vNIC
+ ovirt.ovirt.ovirt_vnic_profile:
+ name: myvnic
+ network: mynetwork
+ state: present
+ data_center: datacenter
+
+- name: Editing vNICs network_filter, custom_properties, qos
+ ovirt.ovirt.ovirt_vnic_profile:
+ name: myvnic
+ network: mynetwork
+ data_center: datacenter
+ qos: myqos
+ custom_properties:
+ - name: SecurityGroups
+ value: 9bd9bde9-39da-44a8-9541-aa39e1a81c9d
+ network_filter: allow-dhcp
+
+- name: Remove vNICs network_filter, custom_properties, qos
+ ovirt.ovirt.ovirt_vnic_profile:
+ name: myvnic
+ network: mynetwork
+ data_center: datacenter
+ qos: ""
+ custom_properties: ""
+ network_filter: ""
+
+- name: Dont use migratable
+ ovirt.ovirt.ovirt_vnic_profile:
+ name: myvnic
+ network: mynetwork
+ data_center: datacenter
+ migratable: False
+ pass_through: enabled
+
+- name: Remove vNIC
+ ovirt.ovirt.ovirt_vnic_profile:
+ name: myvnic
+ network: mynetwork
+ state: absent
+ data_center: datacenter
+'''
+
+RETURN = '''
+id:
+ description: ID of the vNIC profile which is managed
+ returned: On success if vNIC profile is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+vnic:
+ description: "Dictionary of all the vNIC profile attributes. Network interface attributes can be found on your oVirt/RHV instance
+ at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
+ returned: On success if vNIC profile is found.
+ type: dict
+'''
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_name,
+ get_id_by_name
+)
+
+
+class EntityVnicPorfileModule(BaseModule):
+
+ def __init__(self, *args, **kwargs):
+ super(EntityVnicPorfileModule, self).__init__(*args, **kwargs)
+
+ def _get_dcs_service(self):
+ return self._connection.system_service().data_centers_service()
+
+ def _get_dcs_id(self):
+ return get_id_by_name(self._get_dcs_service(), self.param('data_center'))
+
+ def _get_network_id(self):
+ networks_service = self._get_dcs_service().service(self._get_dcs_id()).networks_service()
+ return get_id_by_name(networks_service, self.param('network'))
+
+ def _get_qos_id(self):
+ if self.param('qos'):
+ qoss_service = self._get_dcs_service().service(self._get_dcs_id()).qoss_service()
+ return get_id_by_name(qoss_service, self.param('qos')) if self.param('qos') else None
+ return None
+
+ def _get_network_filter_id(self):
+ nf_service = self._connection.system_service().network_filters_service()
+ return get_id_by_name(nf_service, self.param('network_filter')) if self.param('network_filter') else None
+
+ def _get_network_filter(self):
+ network_filter = None
+ # The order of these condition is necessary.
+ # When would network_filter and pass_through specified it would try to create and network_filter and fail on engine.
+ if self.param('network_filter') == '' or self.param('pass_through') == 'enabled':
+ network_filter = otypes.NetworkFilter()
+ elif self.param('network_filter'):
+ network_filter = otypes.NetworkFilter(id=self._get_network_filter_id())
+ return network_filter
+
+ def _get_qos(self):
+ qos = None
+ # The order of these condition is necessary. When would qos and pass_through specified it would try to create and qos and fail on engine.
+ if self.param('qos') == '' or self.param('pass_through') == 'enabled':
+ qos = otypes.Qos()
+ elif self.param('qos'):
+ qos = otypes.Qos(id=self._get_qos_id())
+ return qos
+
+ def _get_port_mirroring(self):
+ if self.param('pass_through') == 'enabled':
+ return False
+ return self.param('port_mirroring')
+
+ def _get_migratable(self):
+ if self.param('migratable') is not None:
+ return self.param('migratable')
+ if self.param('pass_through') == 'enabled':
+ return True
+
+ def build_entity(self):
+ return otypes.VnicProfile(
+ name=self.param('name'),
+ network=otypes.Network(id=self._get_network_id()),
+ description=self.param('description') if self.param('description') is not None else None,
+ pass_through=otypes.VnicPassThrough(mode=otypes.VnicPassThroughMode(self.param('pass_through'))) if self.param('pass_through') else None,
+ custom_properties=[
+ otypes.CustomProperty(
+ name=cp.get('name'),
+ regexp=cp.get('regexp'),
+ value=str(cp.get('value')),
+ ) for cp in self.param('custom_properties') if cp
+ ] if self.param('custom_properties') else None,
+ migratable=self._get_migratable(),
+ qos=self._get_qos(),
+ port_mirroring=self._get_port_mirroring(),
+ network_filter=self._get_network_filter()
+ )
+
+ def update_check(self, entity):
+ def check_custom_properties():
+ if self.param('custom_properties'):
+ current = []
+ if entity.custom_properties:
+ current = [(cp.name, cp.regexp, str(cp.value)) for cp in entity.custom_properties]
+ passed = [(cp.get('name'), cp.get('regexp'), str(cp.get('value'))) for cp in self.param('custom_properties') if cp]
+ return sorted(current) == sorted(passed)
+ return True
+
+ pass_through = getattr(entity.pass_through.mode, 'name', None)
+ return (
+ check_custom_properties() and
+ # The reason why we can't use equal method, is we get None from _get_network_filter_id or _get_qos_id method, when passing empty string.
+ # And when first param of equal method is None it returns true.
+ self._get_network_filter_id() == getattr(entity.network_filter, 'id', None) and
+ self._get_qos_id() == getattr(entity.qos, 'id', None) and
+ equal(self.param('migratable'), getattr(entity, 'migratable', None)) and
+ equal(self.param('pass_through'), pass_through.lower() if pass_through else None) and
+ equal(self.param('description'), entity.description) and
+ equal(self.param('port_mirroring'), getattr(entity, 'port_mirroring', None))
+ )
+
+
+def get_entity(vnic_services, entitynics_module):
+ vnic_profiles = vnic_services.list()
+ network_id = entitynics_module._get_network_id()
+ for vnic in vnic_profiles:
+ # When vNIC already exist update it, when not create it
+ if vnic.name == entitynics_module.param('name') and network_id == vnic.network.id:
+ return vnic
+
+
+def check_params(module):
+ if (module.params.get('port_mirroring') or module.params.get('network_filter') or module.params.get('qos'))\
+ and module.params.get('pass_through') == 'enabled':
+ module.fail_json(msg="Cannot edit VM network interface profile. 'Port Mirroring,'Qos' and 'Network Filter' are not supported on passthrough profiles.")
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ network=dict(type='str', required=True),
+ data_center=dict(type='str', required=True),
+ description=dict(type='str'),
+ name=dict(type='str', required=True),
+ network_filter=dict(type='str'),
+ custom_properties=dict(type='list', elements='dict'),
+ qos=dict(type='str'),
+ pass_through=dict(type='str', choices=['disabled', 'enabled']),
+ port_mirroring=dict(type='bool'),
+ migratable=dict(type='bool'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+
+ )
+ check_sdk(module)
+ check_params(module)
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+
+ vnic_services = connection.system_service().vnic_profiles_service()
+
+ entitynics_module = EntityVnicPorfileModule(
+ connection=connection,
+ module=module,
+ service=vnic_services,
+ )
+ state = module.params['state']
+ entity = get_entity(vnic_services, entitynics_module)
+ if state == 'present':
+ ret = entitynics_module.create(entity=entity, force_create=entity is None)
+ elif state == 'absent':
+ if entity is not None:
+ ret = entitynics_module.remove(entity=entity)
+ else:
+ raise Exception("Vnic profile '%s' in network '%s' was not found." % (module.params['name'], module.params['network']))
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile_info.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile_info.py
new file mode 100644
index 00000000..ccfc3e69
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/modules/ovirt_vnic_profile_info.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vnic_profile_info
+short_description: Retrieve information about one or more oVirt/RHV vnic profiles
+version_added: "1.0.0"
+author: "Martin Necas (@mnecas)"
+description:
+ - "Retrieve information about one or more oVirt/RHV vnic profiles."
+notes:
+ - "This module returns a variable C(ovirt_vnic_profiles), which
+ contains a list of vnic profiles. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ max:
+ description:
+ - "The maximum number of results to return."
+ type: int
+ name:
+ description:
+ - "Name of vnic profile."
+ type: str
+extends_documentation_fragment: ovirt.ovirt.ovirt_info
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather information 10 vnic profiles
+- ovirt.ovirt.ovirt_vnic_profile_info:
+ max: 10
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_vnic_profiles }}"
+'''
+
+RETURN = '''
+ovirt_vnic_profiles:
+ description: "List of dictionaries describing the vnic profiles. Vnic_profile attributes are mapped to dictionary keys,
+ all vnic profiles attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vnic_profile."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.ovirt.ovirt.plugins.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ max=dict(default=None, type='int'),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vnic_profiles_service = connection.system_service().vnic_profiles_service()
+ vnic_profiles = vnic_profiles_service.list(max=module.params.get('max'))
+ if module.params.get('name') and vnic_profiles:
+ vnic_profiles = [vnic_profile for vnic_profile in vnic_profiles if vnic_profile.name == module.params.get("name")]
+
+ result = dict(
+ ovirt_vnic_profiles=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vnic_profiles
+ ],
+ )
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/test/ovirt_proxied_check.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/test/ovirt_proxied_check.py
new file mode 100644
index 00000000..f65ea2b5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/plugins/test/ovirt_proxied_check.py
@@ -0,0 +1,46 @@
+# ovirt-hosted-engine-setup -- ovirt hosted engine setup
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = ''' Check if un URL will be accessed through a proxy '''
+
+try:
+ from urllib import getproxies_environment
+ from urllib import proxy_bypass
+ from urlparse import urlparse
+except ImportError:
+ from urllib.request import getproxies_environment
+ from urllib.request import proxy_bypass
+ from urllib.parse import urlparse
+
+
+def proxied(value):
+ netloc = urlparse(value).netloc
+ proxied = bool(getproxies_environment()) and not proxy_bypass(netloc)
+ return(proxied)
+
+
+class TestModule(object):
+ ''' Ansible jinja2 tests '''
+
+ def tests(self):
+ return {
+ 'proxied': proxied,
+ }
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/requirements.txt b/collections-debian-merged/ansible_collections/ovirt/ovirt/requirements.txt
new file mode 100644
index 00000000..c9d3c4eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/requirements.txt
@@ -0,0 +1 @@
+ovirt-engine-sdk-python>=4.4.0
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/README.md
new file mode 100644
index 00000000..ba0f4eec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/README.md
@@ -0,0 +1,52 @@
+oVirt Cluster Upgrade
+=========
+
+The `cluster_upgrade` role iterates through all the hosts in a cluster and upgrades them.
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------------|-----------------------|-----------------------------------------------------|
+| cluster_name | Default | Name of the cluster to be upgraded. |
+| stopped_vms | UNDEF | List of virtual machines to stop before upgrading. |
+| stop_non_migratable_vms <br/> <i>alias: stop_pinned_to_host_vms</i> | false | Specify whether to stop virtual machines pinned to the host being upgraded. If true, the pinned non-migratable virtual machines will be stopped and host will be upgraded, otherwise the host will be skipped. |
+| upgrade_timeout | 3600 | Timeout in seconds to wait for host to be upgraded. |
+| host_statuses | [UP] | List of host statuses. If a host is in any of the specified statuses then it will be upgraded. |
+| host_names | [\*] | List of host names to be upgraded. |
+| check_upgrade | false | If true, run check_for_upgrade action on all hosts before executing upgrade on them. If false, run upgrade only for hosts with available upgrades and ignore all other hosts. |
+| reboot_after_upgrade | true | If true reboot hosts after successful upgrade. |
+| use_maintenance_policy | true | If true the cluster policy will be switched to cluster_maintenance during upgrade otherwise the policy will be unchanged. |
+| healing_in_progress_checks | 6 | Maximum number of attempts to check if gluster healing is still in progress. |
+| healing_in_progress_check_delay | 300 | The delay in seconds between each attempt to check if gluster healing is still in progress. |
+| wait_to_finish_healing | 5 | Delay in minutes to wait to finish gluster healing process after successful host upgrade. |
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ cluster_name: production
+ stopped_vms:
+ - openshift-master-0
+ - openshift-node-0
+ - openshift-node-image
+
+ roles:
+ - cluster_upgrade
+ collections:
+ - ovirt.ovirt
+```
+
+[![asciicast](https://asciinema.org/a/122760.png)](https://asciinema.org/a/122760)
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/defaults/main.yml
new file mode 100644
index 00000000..25c50b80
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/defaults/main.yml
@@ -0,0 +1,16 @@
+---
+# stop_pinned_to_host_vms is alias for stop_non_migratable_vms
+stop_non_migratable_vms: "{{ stop_pinned_to_host_vms | default(false) }}"
+upgrade_timeout: 3600
+cluster_name: Default
+check_upgrade: false
+reboot_after_upgrade: true
+use_maintenance_policy: true
+host_statuses:
+ - up
+host_names:
+ - '*'
+pinned_vms_names: []
+healing_in_progress_checks: 6
+healing_in_progress_check_delay: 300
+wait_to_finish_healing: 5
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/cluster_upgrade.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/cluster_upgrade.yml
new file mode 100644
index 00000000..37c57aea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/cluster_upgrade.yml
@@ -0,0 +1,26 @@
+---
+- name: oVirt cluster upgrade
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt.example.com
+ engine_user: admin@internal
+
+ cluster_name: mycluster
+ stop_non_migratable_vms: true
+ host_statuses:
+ - up
+ host_names:
+ - myhost1
+ - myhost2
+
+ roles:
+ - cluster_upgrade
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/passwords.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/passwords.yml
new file mode 100644
index 00000000..92c7613c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/cluster_policy.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/cluster_policy.yml
new file mode 100644
index 00000000..37bac22d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/cluster_policy.yml
@@ -0,0 +1,24 @@
+- name: Get name of the original scheduling policy
+ ovirt_scheduling_policy_info:
+ auth: "{{ ovirt_auth }}"
+ id: "{{ cluster_info.ovirt_clusters[0].scheduling_policy.id }}"
+ check_mode: "no"
+ register: sp_info
+
+- name: Remember the cluster scheduling policy
+ set_fact:
+ cluster_scheduling_policy: "{{ sp_info.ovirt_scheduling_policies[0].name }}"
+
+- name: Remember the cluster scheduling policy properties
+ set_fact:
+ cluster_scheduling_policy_properties: "{{ cluster_info.ovirt_clusters[0].custom_scheduling_policy_properties }}"
+
+- name: Set in cluster upgrade policy
+ ovirt_cluster:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ cluster_name }}"
+ scheduling_policy: cluster_maintenance
+ register: cluster_policy
+ when:
+ - (api_info.ovirt_api.product_info.version.major >= 4 and api_info.ovirt_api.product_info.version.major >= 2) or
+ (api_info.ovirt_api.product_info.version.major == 4 and api_info.ovirt_api.product_info.version.major == 1 and api_info.ovirt_api.product_info.version.revision >= 4)
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/main.yml
new file mode 100644
index 00000000..7f6683fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/main.yml
@@ -0,0 +1,208 @@
+---
+## https://github.com/ansible/ansible/issues/22397
+## Ansible 2.3 generates a WARNING when using {{ }} in defaults variables of role
+## this workarounds it until Ansible resolves the issue:
+- name: Initialize variables
+ set_fact:
+ stop_non_migratable_vms: "{{ stop_non_migratable_vms }}"
+ provided_token: "{{ engine_token | default(lookup('env','OVIRT_TOKEN')) | default('') }}"
+
+- block:
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ hostname: "{{ engine_fqdn | default(lookup('env','OVIRT_HOSTNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ token: "{{ engine_token | default(lookup('env','OVIRT_TOKEN')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: login_result
+ tags:
+ - always
+
+ - name: Get API info
+ ovirt_api_info:
+ auth: "{{ ovirt_auth }}"
+ register: api_info
+ check_mode: "no"
+
+ - name: Get cluster info
+ ovirt_cluster_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ cluster_name }}"
+ fetch_nested: True
+ nested_attributes: name
+ check_mode: "no"
+ register: cluster_info
+
+ - name: Set cluster upgrade status in progress
+ no_log: true
+ uri:
+ url: "{{ ovirt_auth.url }}/clusters/{{ cluster_info.ovirt_clusters[0].id }}/upgrade"
+ method: POST
+ body_format: json
+ validate_certs: false
+ headers:
+ Authorization: "Bearer {{ ovirt_auth.token }}"
+ body:
+ upgrade_action: start
+ register: upgrade_set
+ when: api_info.ovirt_api.product_info.version.major >= 4 and api_info.ovirt_api.product_info.version.minor >= 3
+
+ - name: Log event cluster upgrade has started
+ ovirt_event:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ description: "Cluster upgrade started for {{ cluster_name }}."
+ origin: "cluster_upgrade"
+ custom_id: "{{ 2147483647 | random | int }}"
+ severity: normal
+ cluster: "{{ cluster_info.ovirt_clusters[0].id }}"
+
+ - name: Get hosts
+ ovirt_host_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "cluster={{ cluster_name | mandatory }} {{ check_upgrade | ternary('', 'update_available=true') }} {{ host_names | map('regex_replace', '^(.*)$', 'name=\\1') | list | join(' or ') }} {{ host_statuses | map('regex_replace', '^(.*)$', 'status=\\1') | list | join(' or ') }}"
+ check_mode: "no"
+ register: host_info
+
+ - block:
+ - name: Print - no hosts to be updated
+ debug:
+ msg: "No hosts to be updated"
+
+ - name: Log event - no hosts to be updated
+ ovirt_event:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ description: "There are no hosts to be updated for cluster {{ cluster_name }}."
+ origin: "cluster_upgrade"
+ custom_id: "{{ 2147483647 | random | int }}"
+ severity: normal
+ cluster: "{{ cluster_info.ovirt_clusters[0].id }}"
+ when: host_info.ovirt_hosts | length == 0
+
+ - block:
+ - name: Log event about hosts that are marked to be updated
+ ovirt_event:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ description: "Hosts {{ host_info.ovirt_hosts | map(attribute='name') | join(',') }} are marked to be updated in cluster {{ cluster_name }}."
+ origin: "cluster_upgrade"
+ custom_id: "{{ 2147483647 | random | int }}"
+ severity: normal
+ cluster: "{{ cluster_info.ovirt_clusters[0].id }}"
+
+ - include_tasks: cluster_policy.yml
+ when: use_maintenance_policy
+
+ - name: Get list of VMs in cluster
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "cluster={{ cluster_name }}"
+ check_mode: "no"
+ register: vms_in_cluster
+
+ - include_tasks: pinned_vms.yml
+
+ - name: Start ovirt job session
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts"
+
+ # Update only those hosts that aren't in list of hosts were VMs are pinned
+ # or if stop_non_migratable_vms is enabled, which means we stop pinned VMs
+ - include_tasks: upgrade.yml
+ with_items:
+ - "{{ host_info.ovirt_hosts }}"
+ when: "item.id not in host_ids or stop_non_migratable_vms"
+
+ - name: Start ovirt job session
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts"
+ state: finished
+
+ - name: Log event about cluster upgrade finished successfully
+ ovirt_event:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ description: "Upgrade of cluster {{ cluster_name }} finished successfully."
+ origin: "cluster_upgrade"
+ severity: normal
+ custom_id: "{{ 2147483647 | random | int }}"
+ cluster: "{{ cluster_info.ovirt_clusters[0].id }}"
+
+ when: host_info.ovirt_hosts | length > 0
+ rescue:
+ - name: Log event about cluster upgrade failed
+ ovirt_event:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ description: "Upgrade of cluster {{ cluster_name }} failed."
+ origin: "cluster_upgrade"
+ custom_id: "{{ 2147483647 | random | int }}"
+ severity: error
+ cluster: "{{ cluster_info.ovirt_clusters[0].id }}"
+
+ - name: Update job failed
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts"
+ state: failed
+
+ always:
+ - name: Set original cluster policy
+ ovirt_cluster:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ cluster_name }}"
+ scheduling_policy: "{{ cluster_scheduling_policy }}"
+ scheduling_policy_properties: "{{ cluster_scheduling_policy_properties }}"
+ when: use_maintenance_policy and cluster_policy.changed | default(false)
+
+ - name: Start again stopped VMs
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item }}"
+ state: running
+ ignore_errors: "yes"
+ with_items:
+ - "{{ stopped_vms | default([]) }}"
+
+ - name: Start again pin to host VMs
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item }}"
+ state: running
+ ignore_errors: "yes"
+ with_items:
+ - "{{ pinned_vms_names | default([]) }}"
+ when: "stop_non_migratable_vms"
+
+ always:
+ - name: Set cluster upgrade status to finished
+ no_log: true
+ uri:
+ url: "{{ ovirt_auth.url }}/clusters/{{ cluster_info.ovirt_clusters[0].id }}/upgrade"
+ validate_certs: false
+ method: POST
+ body_format: json
+ headers:
+ Authorization: "Bearer {{ ovirt_auth.token }}"
+ body:
+ upgrade_action: finish
+ when:
+ - upgrade_set is defined and not upgrade_set.failed | default(false)
+ - api_info.ovirt_api.product_info.version.major >= 4 and api_info.ovirt_api.product_info.version.minor >= 3
+
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when:
+ - login_result.skipped is defined and not login_result.skipped
+ - provided_token != ovirt_auth.token
+ tags:
+ - always
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/pinned_vms.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/pinned_vms.yml
new file mode 100644
index 00000000..1ca2f30c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/pinned_vms.yml
@@ -0,0 +1,16 @@
+---
+- name: Create list of host IDs which has running non-migratable VM and are not down
+ set_fact:
+ host_ids_items: "{{ item.host.id }}"
+ with_items:
+ - "{{ vms_in_cluster.ovirt_vms | default([]) }}"
+ when:
+ - "item['placement_policy']['affinity'] != 'migratable'"
+ - "item.host is defined"
+ loop_control:
+ label: "{{ item.name }}"
+ register: host_ids_result
+
+- name: Create list of host IDs which has pinned VM
+ set_fact:
+ host_ids: "{{ host_ids_result.results | rejectattr('ansible_facts', 'undefined') | map(attribute='ansible_facts.host_ids_items') | list }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/upgrade.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/upgrade.yml
new file mode 100644
index 00000000..80c6f4f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/cluster_upgrade/tasks/upgrade.yml
@@ -0,0 +1,100 @@
+- name: Get list of VMs in host
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "cluster={{ cluster_name }} and host={{ item.name }} and status=up"
+ check_mode: "no"
+
+- name: Move user migratable vms
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ force_migrate: true
+ migrate: true
+ state: running
+ name: "{{ item.name }}"
+ register: resp
+ when:
+ - "item['placement_policy']['affinity'] == 'user_migratable'"
+ with_items:
+ - "{{ vms_in_cluster.ovirt_vms }}"
+ loop_control:
+ label: "{{ item.name }}"
+
+- name: Shutdown non-migratable VMs
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: stopped
+ force: true
+ name: "{{ item.name }}"
+ with_items:
+ - "{{ vms_in_cluster.ovirt_vms }}"
+ when:
+ - "item['placement_policy']['affinity'] == 'pinned'"
+ loop_control:
+ label: "{{ item.name }}"
+ register: pinned_to_host_vms
+
+- name: Create list of VM names which was shutted down
+ set_fact:
+ pinned_vms_names: "{{ pinned_vms_names + pinned_to_host_vms.results | selectattr('changed') | map(attribute='item.name') | list }}"
+
+- name: Start ovirt job step
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts"
+ steps:
+ - description: "Upgrading host: {{ item.name }}"
+
+- name: Gather self-heal facts about all gluster hosts in the cluster
+ gluster_heal_info:
+ name: "{{ volume_item.name }}"
+ status_filter: self-heal
+ register: self_heal_status
+ retries: "{{ healing_in_progress_checks }}"
+ delay: "{{ healing_in_progress_check_delay }}"
+ until: >
+ self_heal_status is defined and
+ self_heal_status.glusterfs.heal_info | map(attribute='no_of_entries') | select('defined') | list | map('int') | sum == 0
+ delegate_to: "{{ host_info.ovirt_hosts[0].address }}"
+ connection: ssh
+ with_items:
+ - "{{ cluster_info.ovirt_clusters[0].gluster_volumes }}"
+ loop_control:
+ loop_var: volume_item
+ when: cluster_info.ovirt_clusters[0].gluster_service | bool
+
+- name: Refresh gluster heal info entries to database
+ uri:
+ url: "{{ ovirt_auth.url }}/clusters/{{ cluster_info.ovirt_clusters[0].id }}/refreshglusterhealstatus"
+ method: POST
+ body_format: json
+ validate_certs: false
+ headers:
+ Authorization: "Bearer {{ ovirt_auth.token }}"
+ body: "{}"
+ when:
+ - cluster_info.ovirt_clusters[0].gluster_service | bool
+ - api_info.ovirt_api.product_info.version.major >= 4 and api_info.ovirt_api.product_info.version.minor >= 4
+
+- name: Upgrade host
+ ovirt_host:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item.name }}"
+ state: upgraded
+ check_upgrade: "{{ check_upgrade }}"
+ reboot_after_upgrade: "{{ reboot_after_upgrade }}"
+ timeout: "{{ upgrade_timeout }}"
+
+- name: Delay in minutes to wait to finish gluster healing process after successful host upgrade
+ pause:
+ minutes: "{{ wait_to_finish_healing }}"
+ when:
+ - cluster_info.ovirt_clusters[0].gluster_service | bool
+ - host_info.ovirt_hosts | length > 1
+
+- name: Finish ovirt job step
+ ovirt_job:
+ auth: "{{ ovirt_auth }}"
+ description: "Upgrading hosts"
+ steps:
+ - description: "Upgrading host: {{ item.name }}"
+ state: finished
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/README.md
new file mode 100644
index 00000000..56886ba4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/README.md
@@ -0,0 +1,77 @@
+oVirt Disaster Recovery
+=========
+
+The `disaster_recovery` role responsible to manage the disaster recovery scenarios in oVirt.
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------------|-----------------------|-----------------------------------------------------|
+| dr_ignore_error_clean | False | Specify whether to ignore errors on clean engine setup.<br/>This is mainly being used to avoid failures when trying to move a storage domain to maintenance/detach it. |
+| dr_ignore_error_recover | True | Specify whether to ignore errors on recover. |
+| dr_partial_import | True | Specify whether to use the partial import flag on VM/Template register.<br/>If True, VMs and Templates will be registered without any missing disks, if false VMs/Templates will fail to be registered in case some of their disks will be missing from any of the storage domains. |
+| dr_target_host | secondary | Specify the default target host to be used in the ansible play.<br/> This host indicates the target site which the recover process will be done. |
+| dr_source_map | primary | Specify the default source map to be used in the play.<br/> The source map indicates the key which is used to get the target value for each attribute which we want to register with the VM/Template. |
+| dr_reset_mac_pool | True | If True, then once a VM will be registered, it will automatically reset the mac pool, if configured in the VM. |
+| dr_cleanup_retries_maintenance | 3 | Specify the number of retries of moving a storage domain to maintenance VM as part of a fail back scenario. |
+| dr_cleanup_delay_maintenance | 120 | Specify the number of seconds between each retry as part of a fail back scenario. |
+| dr_clean_orphaned_vms | True | Specify whether to remove any VMs which have no disks from the setup as part of cleanup. |
+| dr_clean_orphaned_disks | True | Specify whether to remove lun disks from the setup as part of engine setup. |
+| dr_running_vms | /tmp/ovirt_dr_running_vm_list | Specify the file path which is used to contain the data of the running VMs in the secondary setup before the failback process run on the primary setup after the secondary site cleanup was finished. Note that the /tmp folder is being used as default so the file will not be available after system reboot.
+
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: Setup oVirt environment
+ hosts: localhost
+ connection: local
+ vars_files:
+ - ovirt_passwords.yml
+ - disaster_recovery_vars.yml
+ roles:
+ - disaster_recovery
+ collections:
+ - ovirt.ovirt
+```
+
+Generate var file mapping [demo](https://youtu.be/s1-Hq_Mk1w8)
+<br/>
+Fail over scenario [demo](https://youtu.be/mEOgH-Tk09c)
+
+Scripts
+-------
+The ovirt-dr script should provide the user a more convenient way to run
+disaster recovery actions as a way to avoid using ansible playbooks directly.
+There are four actions which the user can execute:
+- `generate` Generate the mapping var file based on the primary and secondary setup, to be used for failover and failback
+- `validate` Validate the var file mapping which is used for failover and failback
+- `failover` Start a failover process to the target setup
+- `failback` Start a failback process from the target setup to the source setup
+
+Each of those actions are using a configuration file whose default location is `disaster_recovery/files/dr.conf`<br/>
+The configuration file's location can be changed using `--conf-file` flag in the `ovirt-dr` script.<br/>
+Log file and log level can be configured as well through the `ovirt-dr` script using the flags `--log-file` and `--log-level`
+
+
+Example Script
+--------------
+For mapping file generation (from the `./roles/disaster_recovery/files/` directory):
+```console
+$ ./ovirt-dr generate --log-file=ovirt-dr.log --log-level=DEBUG
+```
+For mapping file validation:
+```console
+$ ./ovirt-dr validate
+```
+For fail-over operation:
+```console
+$ ./ovirt-dr failover
+```
+For fail-back operation:
+```console
+$ ./ovirt-dr failback
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/defaults/main.yml
new file mode 100644
index 00000000..ef07c479
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/defaults/main.yml
@@ -0,0 +1,37 @@
+# Indicate whether to ignore errors on clean engine setup.
+dr_ignore_error_clean: "False"
+
+# Indicate whether to ignore errors on recover.
+dr_ignore_error_recover: "True"
+
+# Indicate whether to use the partial import flag when registering VMs and Templates.
+dr_partial_import: "True"
+
+# Indicate the default target host to be used in the play.
+dr_target_host: "secondary"
+
+# Indicate the default source map to be used in the play.
+dr_source_map: "primary"
+
+# Indicate whether to reset a mac pool of a VM on register.
+dr_reset_mac_pool: "True"
+
+# Indicate the number of retries of moving a storage domain to maintenance (In case of a failure because of running tasks).
+dr_cleanup_retries_maintenance: 3
+
+# Indicate the number of seconds between each maintenance retry (In case of a failure because of running tasks).
+dr_cleanup_delay_maintenance: 120
+
+# Indicate whether to remove any VMs which have no disks from the setup as part of cleanup.
+dr_clean_orphaned_vms: "True"
+
+# Indicate whether to remove lun disks from the setup as part of engine setup.
+dr_clean_orphaned_disks: "True"
+
+# Indicate the default entities status report file name
+dr_report_file: "report.log"
+
+# Indicate the file name which is used to contain the data of the running VMs in the secondary setup before the failback
+# run again on the primary setup after the failback will be finished.
+# Note that the /tmp folder is being used as default so the file will not be available after system reboot.
+dr_running_vms: "/tmp/ovirt_dr_running_vm_list"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/disaster_recovery_vars.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/disaster_recovery_vars.yml
new file mode 100644
index 00000000..7b41b66b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/disaster_recovery_vars.yml
@@ -0,0 +1,109 @@
+---
+dr_sites_primary_url: "https://engine1.example.com/ovirt-engine/api"
+dr_sites_primary_username: "admin@internal"
+dr_sites_primary_ca_file: "/etc/pki/ovirt-engine/ca.pem"
+
+dr_sites_secondary_url: "https://engine2.example.com/ovirt-engine/api"
+dr_sites_secondary_username: "admin@internal"
+dr_sites_secondary_ca_file: "/etc/pki/ovirt-engine/ca.pem"
+
+dr_import_storages:
+ - dr_domain_type: "nfs"
+ dr_primary_master_domain: "True"
+ dr_primary_address: "xx.xx.xx.xx"
+ dr_primary_path: "/export/path1"
+ dr_primary_dc_name: "Prod"
+ dr_primary_name: "primary_master_storage"
+ dr_secondary_master_domain: "True"
+ dr_secondary_address: "yy.yy.yy.yy"
+ dr_secondary_path: "/export/path1"
+ dr_secondary_dc_name: "Recovery"
+ dr_secondary_name: "secondary_master_storage"
+
+ - dr_domain_type: "nfs"
+ dr_wipe_after_delete: False
+ dr_backup: False
+ dr_critical_space_action_blocker: 5
+ dr_warning_low_space: 5
+ dr_primary_master_domain: "False"
+ dr_primary_name: "path2"
+ dr_primary_address: "xx.xx.xx.xx"
+ dr_primary_path: "/export/path2"
+ dr_primary_dc_name: "Prod"
+ dr_secondary_name: "path2"
+ dr_secondary_master_domain: False
+ dr_secondary_address: "yy.yy.yy.yy"
+ dr_secondary_path: "/export/path2"
+ dr_secondary_dc_name: "Recovery"
+
+ - dr_domain_type: "iscsi"
+ dr_wipe_after_delete: False
+ dr_backup: False
+ dr_critical_space_action_blocker: 1
+ dr_warning_low_space: 5
+ dr_primary_master_domain: "False"
+ dr_domain_id: "aa92cc71-1b88-4998-a755-970ef8a638ea"
+ dr_primary_address: "yy.yy.yy.yy"
+ dr_primary_port: 3260
+ dr_primary_target: ["iqn.2017-10.com.primary.redhat:444"]
+ dr_primary_dc_name: "Prod"
+ dr_primary_name: "scsi_domain"
+ dr_secondary_name: "scsi_domain"
+ dr_secondary_dc_name: "Recovery"
+ dr_secondary_master_domain: "False"
+ dr_secondary_address: "zz.zz.zz.zz"
+ dr_secondary_port: 3260
+ dr_secondary_target: ["iqn.2017-07.com.recovery.redhat:444"]
+
+# Mapping for cluster
+dr_cluster_mappings:
+ - primary_name: "cluster_prod"
+ secondary_name: "cluster_recovery"
+
+# Mapping for affinity group
+dr_affinity_group_mappings:
+ - primary_name: "primary_affinity"
+ secondary_name: "secondary_affinity"
+
+# Mapping for affinity label
+dr_affinity_label_mappings:
+ - primary_name: "label_prod"
+ secondary_name: "label_recovery"
+
+# Mapping for domain
+dr_domain_mappings:
+ - primary_name: "new-authz"
+ secondary_name: "internal-authz"
+
+# Mapping for roles
+dr_role_mappings:
+ - primary_name: "VmMananger"
+ secondary_name: "NeverMnd"
+
+# Mapping for vnic profile
+dr_network_mappings:
+ - primary_network_name: "ovirtmgmt"
+ primary_profile_name: "ovirtmgmt"
+ primary_profile_id: "e368cbd4-59d9-4a7e-86c1-e405c916a836"
+ secondary_network_name: "ovirtmgmt"
+ secondary_profile_name: "ovirtmgmt"
+ secondary_profile_id: "e368cbd4-59d9-4a7e-86c1-e405c916a836"
+
+# Mapping for direct LUN disks
+dr_lun_mappings:
+ - primary_logical_unit_id: "360014056a2be431c0fd46c4bdce92b66"
+ primary_storage_type: "iscsi"
+ primary_logical_unit_address: "yy.yy.yy.yy"
+ primary_logical_unit_port: 3260
+ primary_logical_unit_portal: "1"
+ primary_logical_unit_username: ""
+ primary_logical_unit_password: ""
+ primary_logical_unit_target: "iqn.2017-10.com.primary.redhat:444"
+ secondary_storage_type: "iscsi"
+ secondary_logical_unit_id: "36001405961a7f95e6aa461b8dba53052"
+ secondary_logical_unit_address: "zz.zz.zz.zz"
+ secondary_logical_unit_port: 3260
+ secondary_logical_unit_portal: "1"
+ secondary_logical_unit_username: ""
+ secondary_logical_unit_password: ""
+ secondary_logical_unit_target: "iqn.2017-10.com.recovery.redhat:444"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_ovirt_setup.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_ovirt_setup.yml
new file mode 100644
index 00000000..94687024
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_ovirt_setup.yml
@@ -0,0 +1,11 @@
+---
+- name: Setup oVirt environment
+ hosts: localhost
+ connection: local
+ vars_files:
+ - ovirt_passwords.yml
+ - disaster_recovery_vars.yml
+ roles:
+ - disaster_recovery
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_play.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_play.yml
new file mode 100644
index 00000000..ede8b3cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_play.yml
@@ -0,0 +1,8 @@
+---
+- name: Setup oVirt environment
+ hosts: localhost
+ connection: local
+ roles:
+ - disaster_recovery
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/ovirt_passwords.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/ovirt_passwords.yml
new file mode 100644
index 00000000..58c52304
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/ovirt_passwords.yml
@@ -0,0 +1,13 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+dr_sites_primary_password: 123456
+dr_sites_secondary_password: 123456
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/bcolors.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/bcolors.py
new file mode 100644
index 00000000..05348131
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/bcolors.py
@@ -0,0 +1,21 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class bcolors:
+ HEADER = '\033[95m'
+ OKBLUE = '\033[1;34m'
+ OKGREEN = '\033[0;32m'
+ WARNING = '\x1b[0;33m'
+ FAIL = '\033[0;31m'
+ ENDC = '\033[0m'
+
+ def disable(self):
+ self.HEADER = ''
+ self.OKBLUE = ''
+ self.OKGREEN = ''
+ self.WARNING = ''
+ self.FAIL = ''
+ self.ENDC = ''
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/dr.conf b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/dr.conf
new file mode 100644
index 00000000..7b17e131
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/dr.conf
@@ -0,0 +1,21 @@
+[log]
+log_file=/tmp/ovirt-dr-{}.log
+log_level=DEBUG
+
+[generate_vars]
+site=http://engine.example.com/ovirt-engine/api
+username=admin@internal
+password=
+ca_file=/etc/pki/ovirt-engine/ca.pem
+output_file=~/.ansible/collections/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/disaster_recovery_vars.yml
+ansible_play=~/.ansible/collections/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_play.yml
+
+[validate_vars]
+var_file=~/.ansible/collections/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/disaster_recovery_vars.yml
+
+[failover_failback]
+dr_target_host=secondary
+dr_source_map=primary
+vault=~/.ansible/collections/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/ovirt_passwords.yml
+var_file=~/.ansible/collections/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/disaster_recovery_vars.yml
+ansible_play=~/.ansible/collections/ansible_collections/ovirt/ovirt/roles/disaster_recovery/examples/dr_play.yml
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_back.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_back.py
new file mode 100755
index 00000000..373d0822
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_back.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import os.path
+import subprocess
+from subprocess import call
+import sys
+import time
+
+from configparser import ConfigParser
+from ansible.module_utils.six.moves import input
+
+from bcolors import bcolors
+
+INFO = bcolors.OKGREEN
+INPUT = bcolors.OKGREEN
+WARN = bcolors.WARNING
+FAIL = bcolors.FAIL
+END = bcolors.ENDC
+PREFIX = "[Failback] "
+VAR_FILE_DEF = "../examples/disaster_recovery_vars.yml"
+PLAY_DEF = "../examples/dr_play.yml"
+report_name = "report-{}.log"
+
+
+class FailBack:
+
+ def run(self, conf_file, log_file, log_level):
+ log = self._set_log(log_file, log_level)
+ log.info("Start failback operation...")
+ target_host, source_map, var_file, vault_file, ansible_play_file = \
+ self._init_vars(conf_file)
+ report = report_name.format(int(round(time.time() * 1000)))
+ log.info("\ntarget_host: %s \n"
+ "source_map: %s \n"
+ "var_file: %s \n"
+ "vault_file: %s \n"
+ "ansible_play_file: %s \n"
+ "report log file: /tmp/%s\n",
+ target_host,
+ source_map,
+ var_file,
+ vault_file,
+ ansible_play_file,
+ report)
+
+ dr_clean_tag = "clean_engine"
+ extra_vars_cleanup = " dr_source_map=" + target_host
+ command_cleanup = [
+ "ansible-playbook", ansible_play_file,
+ "-t", dr_clean_tag,
+ "-e", "@" + var_file,
+ "-e", "@" + vault_file,
+ "-e", extra_vars_cleanup,
+ "--vault-password-file", "vault_secret.sh",
+ "-vvv"
+ ]
+
+ dr_failback_tag = "fail_back"
+ extra_vars_failback = (" dr_target_host=" + target_host
+ + " dr_source_map=" + source_map
+ + " dr_report_file=" + report)
+ command_failback = [
+ "ansible-playbook", ansible_play_file,
+ "-t", dr_failback_tag,
+ "-e", "@" + var_file,
+ "-e", "@" + vault_file,
+ "-e", extra_vars_failback,
+ "--vault-password-file", "vault_secret.sh",
+ "-vvv"
+ ]
+
+ # Setting vault password.
+ vault_pass = input("%s%sPlease enter vault password "
+ "(in case of plain text please press ENTER): %s"
+ % (INPUT, PREFIX, END))
+ os.system("export vault_password=\"" + vault_pass + "\"")
+
+ info_msg = ("Starting cleanup process of setup '{0}' for "
+ "oVirt ansible disaster recovery".format(target_host))
+ log.info(info_msg)
+ print("\n%s%s%s%s" % (INFO, PREFIX, info_msg, END))
+
+ log.info("Executing cleanup command: %s",
+ ' '.join(map(str, command_cleanup)))
+ if log_file is not None and log_file != '':
+ self._log_to_file(log_file, command_cleanup)
+ else:
+ self._log_to_console(command_cleanup, log)
+
+ info_msg = ("Finished cleanup of setup '{0}' "
+ "for oVirt ansible disaster recovery".format(source_map))
+ log.info(info_msg)
+ print("\n%s%s%s%s" % (INFO, PREFIX, info_msg, END))
+
+ info_msg = ("Starting failback process to setup '{0}' "
+ "from setup '{1}' for oVirt ansible disaster recovery"
+ .format(target_host, source_map))
+ log.info(info_msg)
+ print("\n%s%s%s%s" % (INFO, PREFIX, info_msg, END))
+
+ log.info("Executing failback command: %s",
+ ' '.join(map(str, command_failback)))
+ if log_file is not None and log_file != '':
+ self._log_to_file(log_file, command_failback)
+ else:
+ self._log_to_console(command_failback, log)
+
+ call(["cat", "/tmp/" + report])
+ print("\n%s%sFinished failback operation"
+ " for oVirt ansible disaster recovery%s" % (INFO, PREFIX, END))
+
+ def _log_to_file(self, log_file, command):
+ with open(log_file, "a") as f:
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ if 'TASK [' in line:
+ print("\n%s%s%s\n" % (INFO, line, END))
+ if "[Failback Replication Sync]" in line:
+ print("%s%s%s" % (INFO, line, END))
+ f.write(line)
+ for line in iter(proc.stderr.readline, ''):
+ f.write(line)
+ print("%s%s%s" % (WARN, line, END))
+
+ def _log_to_console(self, command, log):
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ if "[Failback Replication Sync]" in line:
+ print("%s%s%s" % (INFO, line, END))
+ else:
+ log.debug(line)
+ for line in iter(proc.stderr.readline, ''):
+ log.warn(line)
+ self._handle_result(command)
+
+ def _handle_result(self, command):
+ try:
+ # TODO: do something with the returned output?
+ subprocess.check_output(command, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ print("%sException: %s\n\n"
+ "failback operation failed, please check log file for "
+ "further details.%s"
+ % (FAIL, e, END))
+ sys.exit()
+
+ def _init_vars(self, conf_file):
+ """ Declare constants """
+ _SECTION = "failover_failback"
+ _TARGET = "dr_target_host"
+ _SOURCE = "dr_source_map"
+ _VAULT = "vault"
+ _VAR_FILE = "var_file"
+ _ANSIBLE_PLAY = 'ansible_play'
+ setups = ['primary', 'secondary']
+
+ settings = ConfigParser()
+ settings.read(conf_file)
+ if _SECTION not in settings.sections():
+ settings.add_section(_SECTION)
+ if not settings.has_option(_SECTION, _TARGET):
+ settings.set(_SECTION, _TARGET, '')
+ if not settings.has_option(_SECTION, _SOURCE):
+ settings.set(_SECTION, _SOURCE, '')
+ if not settings.has_option(_SECTION, _VAULT):
+ settings.set(_SECTION, _VAULT, '')
+ if not settings.has_option(_SECTION, _VAR_FILE):
+ settings.set(_SECTION, _VAR_FILE, '')
+ if not settings.has_option(_SECTION, _ANSIBLE_PLAY):
+ settings.set(_SECTION, _ANSIBLE_PLAY, '')
+
+ # We fetch the source map as target host,
+ # since in failback we do the reverse operation.
+ target_host = settings.get(_SECTION, _SOURCE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ source_map=None))
+
+ # We fetch the target host as target the source mapping for failback,
+ # since we do the reverse operation.
+ source_map = settings.get(_SECTION, _TARGET,
+ vars=DefaultOption(settings,
+ _SECTION,
+ target_host=None))
+
+ vault_file = settings.get(_SECTION, _VAULT,
+ vars=DefaultOption(settings,
+ _SECTION,
+ vault=None))
+ vault_file = os.path.expanduser(vault_file)
+
+ var_file = settings.get(_SECTION, _VAR_FILE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ var_file=None))
+ var_file = os.path.expanduser(var_file)
+
+ ansible_play_file = settings.get(_SECTION, _ANSIBLE_PLAY,
+ vars=DefaultOption(settings,
+ _SECTION,
+ ansible_play=None))
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ while target_host not in setups:
+ target_host = input("%s%sThe target host '%s' was not defined. "
+ "Please provide the target host "
+ "to failback to (primary or secondary): %s"
+ % (INPUT, PREFIX, target_host, END))
+ while source_map not in setups:
+ source_map = input("%s%sThe source mapping '%s' was not defined. "
+ "Please provide the source mapping "
+ "(primary or secondary): %s"
+ % (INPUT, PREFIX, source_map, END))
+
+ while not os.path.isfile(var_file):
+ var_file = input("%s%sVar file '%s' does not exist. Please "
+ "provide the location of the var file (%s): %s"
+ % (INPUT, PREFIX, var_file, VAR_FILE_DEF, END)
+ ) or VAR_FILE_DEF
+ var_file = os.path.expanduser(var_file)
+
+ while not os.path.isfile(vault_file):
+ vault_file = input("%s%sPassword file '%s' does not exist. "
+ "Please provide a valid password file: %s"
+ % (INPUT, PREFIX, vault_file, END))
+ vault_file = os.path.expanduser(vault_file)
+
+ while not os.path.isfile(ansible_play_file):
+ ansible_play_file = input("%s%sAnsible play file '%s' does not "
+ "exist. Please provide the ansible play "
+ "file to run the failback flow (%s): %s"
+ % (INPUT,
+ PREFIX,
+ ansible_play_file,
+ PLAY_DEF,
+ END)
+ ) or PLAY_DEF
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ return target_host, source_map, var_file, vault_file, ansible_play_file
+
+ def _set_log(self, log_file, log_level):
+ logger = logging.getLogger(PREFIX)
+
+ if log_file is not None and log_file != '':
+ formatter = logging.Formatter(
+ '%(asctime)s %(levelname)s %(message)s')
+ hdlr = logging.FileHandler(log_file)
+ hdlr.setFormatter(formatter)
+ else:
+ hdlr = logging.StreamHandler(sys.stdout)
+
+ logger.addHandler(hdlr)
+ logger.setLevel(log_level)
+ return logger
+
+
+class DefaultOption(dict):
+
+ def __init__(self, config, section, **kv):
+ self._config = config
+ self._section = section
+ dict.__init__(self, **kv)
+
+ def items(self):
+ _items = []
+ for option in self:
+ if not self._config.has_option(self._section, option):
+ _items.append((option, self[option]))
+ else:
+ value_in_config = self._config.get(self._section, option)
+ _items.append((option, value_in_config))
+ return _items
+
+
+if __name__ == "__main__":
+ FailBack().run(conf_file='dr.conf',
+ log_file='/tmp/ovirt-dr.log',
+ log_level=logging.getLevelName("DEBUG"))
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_over.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_over.py
new file mode 100755
index 00000000..fdc33632
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/fail_over.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import os.path
+import subprocess
+from subprocess import call
+import sys
+import time
+
+from configparser import ConfigParser
+from ansible.module_utils.six.moves import input
+
+from bcolors import bcolors
+
+INFO = bcolors.OKGREEN
+INPUT = bcolors.OKGREEN
+WARN = bcolors.WARNING
+FAIL = bcolors.FAIL
+END = bcolors.ENDC
+PREFIX = "[Failover] "
+VAR_FILE_DEF = "../examples/disaster_recovery_vars.yml"
+PLAY_DEF = "../examples/dr_play.yml"
+report_name = "report-{}.log"
+
+
+class FailOver:
+
+ def run(self, conf_file, log_file, log_level):
+ log = self._set_log(log_file, log_level)
+ log.info("Start failover operation...")
+ target_host, source_map, var_file, vault_file, ansible_play_file = \
+ self._init_vars(conf_file)
+ report = report_name.format(int(round(time.time() * 1000)))
+ log.info("\ntarget_host: %s \n"
+ "source_map: %s \n"
+ "var_file: %s \n"
+ "vault_file: %s \n"
+ "ansible_play_file: %s \n"
+ "report log file: /tmp/%s\n",
+ target_host,
+ source_map,
+ var_file,
+ vault_file,
+ ansible_play_file,
+ report)
+
+ dr_tag = "fail_over"
+ extra_vars = (" dr_target_host=" + target_host
+ + " dr_source_map=" + source_map
+ + " dr_report_file=" + report)
+ command = [
+ "ansible-playbook", ansible_play_file,
+ "-t", dr_tag,
+ "-e", "@" + var_file,
+ "-e", "@" + vault_file,
+ "-e", extra_vars,
+ "--vault-password-file", "vault_secret.sh",
+ "-vvv"
+ ]
+
+ # Setting vault password.
+ vault_pass = input("%s%sPlease enter vault password "
+ "(in case of plain text please press ENTER): %s"
+ % (INPUT, PREFIX, END))
+ os.system("export vault_password=\"" + vault_pass + "\"")
+
+ log.info("Executing failover command: %s", ' '.join(map(str, command)))
+ if log_file is not None and log_file != '':
+ self._log_to_file(log_file, command)
+ else:
+ self._log_to_console(command, log)
+
+ call(["cat", "/tmp/" + report])
+ print("\n%s%sFinished failover operation"
+ " for oVirt ansible disaster recovery%s" % (INFO, PREFIX, END))
+
+ def _log_to_file(self, log_file, command):
+ with open(log_file, "a") as f:
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ if 'TASK [' in line:
+ print("\n%s%s%s\n" % (INFO, line, END))
+ f.write(line)
+ for line in iter(proc.stderr.readline, ''):
+ f.write(line)
+ print("%s%s%s" % (WARN, line, END))
+ self._handle_result(command)
+
+ def _log_to_console(self, command, log):
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ log.debug(line)
+ for line in iter(proc.stderr.readline, ''):
+ log.warn(line)
+
+ def _handle_result(self, command):
+ try:
+ # TODO: do something with the returned output?
+ subprocess.check_output(command, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ print("%sException: %s\n\n"
+ "failover operation failed, please check log file for "
+ "further details.%s"
+ % (FAIL, e, END))
+ sys.exit()
+
+ def _init_vars(self, conf_file):
+ """ Declare constants """
+ _SECTION = "failover_failback"
+ _TARGET = "dr_target_host"
+ _SOURCE = "dr_source_map"
+ _VAULT = "vault"
+ _VAR_FILE = "var_file"
+ _ANSIBLE_PLAY = 'ansible_play'
+ setups = ['primary', 'secondary']
+
+ settings = ConfigParser()
+ settings.read(conf_file)
+ if _SECTION not in settings.sections():
+ settings.add_section(_SECTION)
+ if not settings.has_option(_SECTION, _TARGET):
+ settings.set(_SECTION, _TARGET, '')
+ if not settings.has_option(_SECTION, _SOURCE):
+ settings.set(_SECTION, _SOURCE, '')
+ if not settings.has_option(_SECTION, _VAULT):
+ settings.set(_SECTION, _VAULT, '')
+ if not settings.has_option(_SECTION, _VAR_FILE):
+ settings.set(_SECTION, _VAR_FILE, '')
+ if not settings.has_option(_SECTION, _ANSIBLE_PLAY):
+ settings.set(_SECTION, _ANSIBLE_PLAY, '')
+
+ target_host = settings.get(_SECTION, _TARGET,
+ vars=DefaultOption(settings,
+ _SECTION,
+ target_host=None))
+
+ source_map = settings.get(_SECTION, _SOURCE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ source_map=None))
+
+ vault_file = settings.get(_SECTION, _VAULT,
+ vars=DefaultOption(settings,
+ _SECTION,
+ vault=None))
+ vault_file = os.path.expanduser(vault_file)
+
+ var_file = settings.get(_SECTION, _VAR_FILE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ var_file=None))
+ var_file = os.path.expanduser(var_file)
+
+ ansible_play_file = settings.get(_SECTION, _ANSIBLE_PLAY,
+ vars=DefaultOption(settings,
+ _SECTION,
+ ansible_play=None))
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ while target_host not in setups:
+ target_host = input("%s%sThe target host '%s' was not defined. "
+ "Please provide the target host "
+ "to failover to (primary or secondary): %s"
+ % (INPUT, PREFIX, target_host, END))
+ while source_map not in setups:
+ source_map = input("%s%sThe source mapping '%s' was not defined. "
+ "Please provide the source mapping "
+ "(primary or secondary): %s"
+ % (INPUT, PREFIX, source_map, END))
+
+ while not os.path.isfile(var_file):
+ var_file = input("%s%sVar file '%s' does not exist. Please "
+ "provide the location of the var file (%s): %s"
+ % (INPUT, PREFIX, var_file, VAR_FILE_DEF, END)
+ ) or VAR_FILE_DEF
+ var_file = os.path.expanduser(var_file)
+
+ while not os.path.isfile(vault_file):
+ vault_file = input("%s%sPassword file '%s' does not exist. "
+ "Please provide a valid password file: %s"
+ % (INPUT, PREFIX, vault_file, END))
+ vault_file = os.path.expanduser(vault_file)
+
+ while not os.path.isfile(ansible_play_file):
+ ansible_play_file = input("%s%sAnsible play file '%s' does not "
+ "exist. Please provide the ansible play "
+ "file to run the failover flow (%s): %s"
+ % (INPUT,
+ PREFIX,
+ ansible_play_file,
+ PLAY_DEF,
+ END)
+ ) or PLAY_DEF
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ return target_host, source_map, var_file, vault_file, ansible_play_file
+
+ def _set_log(self, log_file, log_level):
+ logger = logging.getLogger(PREFIX)
+
+ if log_file is not None and log_file != '':
+ formatter = logging.Formatter(
+ '%(asctime)s %(levelname)s %(message)s')
+ hdlr = logging.FileHandler(log_file)
+ hdlr.setFormatter(formatter)
+ else:
+ hdlr = logging.StreamHandler(sys.stdout)
+
+ logger.addHandler(hdlr)
+ logger.setLevel(log_level)
+ return logger
+
+
+class DefaultOption(dict):
+
+ def __init__(self, config, section, **kv):
+ self._config = config
+ self._section = section
+ dict.__init__(self, **kv)
+
+ def items(self):
+ _items = []
+ for option in self:
+ if not self._config.has_option(self._section, option):
+ _items.append((option, self[option]))
+ else:
+ value_in_config = self._config.get(self._section, option)
+ _items.append((option, value_in_config))
+ return _items
+
+
+if __name__ == "__main__":
+ FailOver().run(conf_file='dr.conf',
+ log_file='/tmp/ovirt-dr.log',
+ log_level=logging.getLevelName("DEBUG"))
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_mapping.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_mapping.py
new file mode 100755
index 00000000..2f9152ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_mapping.py
@@ -0,0 +1,445 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import getopt
+import logging
+
+import ovirtsdk4 as sdk
+import ovirtsdk4.types as otypes
+
+# TODO: log file location is currently in the same folder
+logging.basicConfig(level=logging.DEBUG, filename='generator.log')
+
+
+# Documentation: We only support attached storage domains in the var generator.
+def main(argv):
+ url, username, password, ca, file_ = _init_vars(argv)
+ connection = _connect_sdk(url, username, password, ca, logging.getLogger())
+ host_storages = _get_host_storages_for_external_lun_disks(connection)
+ external_disks = _get_external_lun_disks(connection)
+ affinity_labels = _get_affinity_labels(connection)
+ domains = _get_aaa_domains(connection)
+ networks = _get_vnic_profile_mapping(connection)
+
+ f = open(file_, 'w')
+ _write_file_header(f, url, username, ca)
+ clusters, affinity_groups = _handle_dc_properties(f, connection)
+ _write_clusters(f, clusters)
+ _write_affinity_groups(f, affinity_groups)
+ _write_affinity_labels(f, affinity_labels)
+ _write_aaa_domains(f, domains)
+ _write_roles(f)
+ _write_vnic_profiles(f, networks)
+ _write_external_lun_disks(f, external_disks, host_storages)
+ connection.close()
+
+
+def _init_vars(argv):
+ url, username, password, ca, file_ = '', '', '', '', ''
+ try:
+ opts, args = getopt.getopt(
+ argv,
+ "a:u:p:f:c:", ["a=", "u=", "p=", "f=", "c="])
+ except getopt.GetoptError:
+ print(
+ '''
+ -a <http://127.0.0.1:8080/ovirt-engine/api>\n
+ -u <admin@portal>\n
+ -p <password>\n
+ -c </etc/pki/ovirt-engine/ca.pem>\n
+ -f <disaster_recovery_vars.yml>
+ ''')
+ sys.exit(2)
+
+ for opt, arg in opts:
+ if opt == '-h':
+ print(
+ '''
+ generate_mapping.py
+ -a <http://127.0.0.1:8080/ovirt-engine/api>\n
+ -u <admin@portal>\n
+ -p <password>\n
+ -c </etc/pki/ovirt-engine/ca.pem>\n
+ -f <disaster_recovery_vars.yml>
+ ''')
+ sys.exit()
+ elif opt in ("-a", "--url"):
+ url = arg
+ elif opt in ("-u", "--username"):
+ username = arg
+ elif opt in ("-p", "--password"):
+ password = arg
+ elif opt in ("-c", "--ca"):
+ ca = arg
+ elif opt in ("-f", "--file"):
+ file_ = arg
+ return url, username, password, ca, file_
+
+
+def _connect_sdk(url, username, password, ca, log_):
+ connection = sdk.Connection(
+ url=url,
+ username=username,
+ password=password,
+ ca_file=ca,
+ debug=True,
+ log=log_,
+ )
+ return connection
+
+
+def _write_file_header(f, url, username, ca):
+ """
+ Add header for paramter file, for example:
+ dr_sites_primary_url: "http://engine1.redhat.com:8080/ovirt-engine/api"
+ dr_sites_primary_username: "admin@internal"
+ dr_sites_primary_ca_file: "ovirt-share/etc/pki/ovirt-engine/ca.pem"
+
+ dr_sites_secondary_url:
+ dr_sites_secondary_username:
+ dr_sites_secondary_ca_file:
+ """
+ f.write("---\n")
+ f.write("dr_sites_primary_url: %s\n" % url)
+ f.write("dr_sites_primary_username: %s\n" % username)
+ f.write("dr_sites_primary_ca_file: %s\n\n" % ca)
+
+ f.write("# Please fill in the following properties "
+ "for the secondary site: \n")
+ f.write("dr_sites_secondary_url: # %s\n" % url)
+ f.write("dr_sites_secondary_username: # %s\n" % username)
+ f.write("dr_sites_secondary_ca_file: # %s\n\n" % ca)
+
+
+def _handle_dc_properties(f, connection):
+ f.write("dr_import_storages:\n")
+ dcs_service = connection.system_service().data_centers_service()
+ dcs_list = dcs_service.list()
+ clusters = []
+ affinity_groups = []
+ for dc in dcs_list:
+ dc_service = dcs_service.data_center_service(dc.id)
+ _write_attached_storage_domains(f, dc_service, dc)
+ _add_clusters_and_aff_groups_for_dc(dc_service,
+ clusters,
+ affinity_groups)
+ return clusters, affinity_groups
+
+
+def _get_host_storages_for_external_lun_disks(connection):
+ host_storages = {}
+ hosts_service = connection.system_service().hosts_service()
+ hosts_list = hosts_service.list(search='status=up')
+
+ # The reason we go over each active Host in the DC is that there might
+ # be a Host which fail to connect to a certain device but still be active.
+ for host in hosts_list:
+ host_storages_service = hosts_service.host_service(host.id) \
+ .storage_service().list()
+ for host_storage in host_storages_service:
+ if host_storage.id not in host_storages.keys():
+ host_storages[host_storage.id] = host_storage
+ return host_storages
+
+
+def _get_external_lun_disks(connection):
+ external_disks = []
+ disks_service = connection.system_service().disks_service()
+ disks_list = disks_service.list()
+ for disk in disks_list:
+ if otypes.DiskStorageType.LUN == disk.storage_type:
+ external_disks.append(disk)
+ return external_disks
+
+
+def _get_affinity_labels(connection):
+ affinity_labels = []
+ affinity_labels_service = \
+ connection.system_service().affinity_labels_service()
+ affinity_labels_list = affinity_labels_service.list()
+ for affinity_label in affinity_labels_list:
+ affinity_labels.append(affinity_label.name)
+ return affinity_labels
+
+
+def _get_aaa_domains(connection):
+ domains = []
+ domains_service = connection.system_service().domains_service()
+ domains_list = domains_service.list()
+ for domain in domains_list:
+ domains.append(domain.name)
+ return domains
+
+
+def _get_vnic_profile_mapping(connection):
+ networks = []
+ vnic_profiles_service = connection.system_service().vnic_profiles_service()
+ vnic_profile_list = vnic_profiles_service.list()
+ for vnic_profile_item in vnic_profile_list:
+ mapped_network = {}
+ networks_list = connection.system_service().networks_service().list()
+ network_name = ''
+ for network_item in networks_list:
+ if network_item.id == vnic_profile_item.network.id:
+ network_name = network_item.name
+ dc_name = connection.system_service().data_centers_service(). \
+ data_center_service(network_item.data_center.id). \
+ get()._name
+ break
+ mapped_network['network_name'] = network_name
+ mapped_network['network_dc'] = dc_name
+ mapped_network['profile_name'] = vnic_profile_item.name
+ mapped_network['profile_id'] = vnic_profile_item.id
+ networks.append(mapped_network)
+ return networks
+
+
+def _add_clusters_and_aff_groups_for_dc(dc_service, clusters, affinity_groups):
+ clusters_service = dc_service.clusters_service()
+ attached_clusters_list = clusters_service.list()
+ for cluster in attached_clusters_list:
+ clusters.append(cluster.name)
+ cluster_service = clusters_service.cluster_service(cluster.id)
+ _add_affinity_groups_for_cluster(cluster_service, affinity_groups)
+
+
+def _add_affinity_groups_for_cluster(cluster_service, affinity_groups):
+ affinity_groups_service = cluster_service.affinity_groups_service()
+ for affinity_group in affinity_groups_service.list():
+ affinity_groups.append(affinity_group.name)
+
+
+def _write_attached_storage_domains(f, dc_service, dc):
+ """
+ Add all the attached storage domains to the var file
+ """
+ # Locate the service that manages the storage domains that are attached
+ # to the data centers:
+ attached_sds_service = dc_service.storage_domains_service()
+ attached_sds_list = attached_sds_service.list()
+ for attached_sd in attached_sds_list:
+ if attached_sd.name == 'hosted_storage':
+ f.write("# Hosted storage should not be part of the "
+ "recovery process! Comment it out.\n")
+ f.write("#- dr_domain_type: %s\n" % attached_sd.storage.type)
+ f.write("# dr_primary_name: %s\n" % attached_sd.name)
+ f.write("# dr_primary_dc_name: %s\n\n" % dc.name)
+ continue
+
+ if attached_sd.type == otypes.StorageDomainType.EXPORT:
+ f.write("# Export storage domain should not be part of the "
+ "recovery process!\n")
+ f.write("# Please note that a data center with an export "
+ "storage domain might reflect on the failback process.\n")
+ f.write("#- dr_domain_type: %s\n" % attached_sd.storage.type)
+ f.write("# dr_primary_name: %s\n" % attached_sd.name)
+ f.write("# dr_primary_dc_name: %s\n\n" % dc.name)
+ continue
+
+ f.write("- dr_domain_type: %s\n" % attached_sd.storage.type)
+ f.write(" dr_wipe_after_delete: %s\n" % attached_sd.wipe_after_delete)
+ f.write(" dr_backup: %s\n" % attached_sd.backup)
+ f.write(" dr_critical_space_action_blocker: %s\n"
+ % attached_sd.critical_space_action_blocker)
+ f.write(" dr_storage_domain_type: %s\n" % attached_sd.type)
+ f.write(" dr_warning_low_space: %s\n"
+ % attached_sd.warning_low_space_indicator)
+ f.write(" dr_primary_name: %s\n" % attached_sd.name)
+ f.write(" dr_primary_master_domain: %s\n" % attached_sd.master)
+ f.write(" dr_primary_dc_name: %s\n" % dc.name)
+ is_fcp = attached_sd._storage.type == otypes.StorageType.FCP
+ is_scsi = attached_sd.storage.type == otypes.StorageType.ISCSI
+ if not is_fcp and not is_scsi:
+ f.write(" dr_primary_path: %s\n" % attached_sd.storage.path)
+ f.write(" dr_primary_address: %s\n" % attached_sd.storage.address)
+ if attached_sd._storage.type == otypes.StorageType.POSIXFS:
+ f.write(" dr_primary_vfs_type: %s\n"
+ % attached_sd.storage.vfs_type)
+ _add_secondary_mount(f, dc.name, attached_sd)
+ else:
+ f.write(" dr_discard_after_delete: %s\n"
+ % attached_sd.discard_after_delete)
+ f.write(" dr_domain_id: %s\n" % attached_sd.id)
+ if attached_sd._storage._type == otypes.StorageType.ISCSI:
+ f.write(" dr_primary_address: %s\n" %
+ attached_sd.storage.volume_group
+ .logical_units[0].address)
+ f.write(" dr_primary_port: %s\n" %
+ attached_sd.storage.volume_group.logical_units[0].port)
+ targets = set(lun_unit.target for lun_unit in
+ attached_sd.storage.volume_group.logical_units)
+ f.write(" dr_primary_target: [%s]\n" %
+ ','.join(['"' + target + '"' for target in targets]))
+ _add_secondary_scsi(f, dc.name, attached_sd, targets)
+ else:
+ _add_secondary_fcp(f, dc.name, attached_sd)
+ f.write("\n")
+
+
+def _add_secondary_mount(f, dc_name, attached):
+ f.write(" # Fill in the empty properties related to the secondary site\n")
+ f.write(" dr_secondary_name: # %s\n" % attached.name)
+ f.write(" dr_secondary_master_domain: # %s\n" % attached.master)
+ f.write(" dr_secondary_dc_name: # %s\n" % dc_name)
+ f.write(" dr_secondary_path: # %s\n" % attached.storage.path)
+ f.write(" dr_secondary_address: # %s\n" % attached.storage.address)
+ if attached._storage.type == otypes.StorageType.POSIXFS:
+ f.write(" dr_secondary_vfs_type: # %s\n" % attached.storage.vfs_type)
+
+
+def _add_secondary_scsi(f, dc_name, attached, targets):
+ f.write(" # Fill in the empty properties related to the secondary site\n")
+ f.write(" dr_secondary_name: # %s\n" % attached.name)
+ f.write(" dr_secondary_master_domain: # %s\n" % attached.master)
+ f.write(" dr_secondary_dc_name: # %s\n" % dc_name)
+ f.write(" dr_secondary_address: # %s\n" % attached.storage.volume_group
+ .logical_units[0].address)
+ f.write(" dr_secondary_port: # %s\n" % attached.storage.volume_group
+ .logical_units[0].port)
+ f.write(" # target example: [\"target1\",\"target2\",\"target3\"]\n")
+ f.write(" dr_secondary_target: # [%s]\n" %
+ ','.join(['"' + target + '"' for target in targets]))
+
+
+def _add_secondary_fcp(f, dc_name, attached):
+ f.write(" # Fill in the empty properties related to the secondary site\n")
+ f.write(" dr_secondary_name: # %s\n" % attached.name)
+ f.write(" dr_secondary_master_domain: # %s\n" % attached.master)
+ f.write(" dr_secondary_dc_name: # %s\n" % dc_name)
+
+
+def _write_clusters(f, clusters):
+ f.write("# Mapping for cluster\n")
+ f.write("dr_cluster_mappings:\n")
+ for cluster_name in clusters:
+ f.write("- primary_name: %s\n" % cluster_name)
+ f.write(" # Fill the correlated cluster name in the "
+ "secondary site for cluster '%s'\n" % cluster_name)
+ f.write(" secondary_name: # %s\n\n" % cluster_name)
+
+
+def _write_affinity_groups(f, affinity_groups):
+ f.write("\n# Mapping for affinity group\n")
+ f.write("dr_affinity_group_mappings:\n")
+ for affinity_group in affinity_groups:
+ f.write("- primary_name: %s\n" % affinity_group)
+ f.write(" # Fill the correlated affinity group name in the "
+ "secondary site for affinity '%s'\n" % affinity_group)
+ f.write(" secondary_name: # %s\n\n" % affinity_group)
+
+
+def _write_affinity_labels(f, affinity_labels):
+ f.write("\n# Mapping for affinity label\n")
+ f.write("dr_affinity_label_mappings:\n")
+ for affinity_label in affinity_labels:
+ f.write("- primary_name: %s\n" % affinity_label)
+ f.write(" # Fill the correlated affinity label name in the "
+ "secondary site for affinity label '%s'\n" % affinity_label)
+ f.write(" secondary_name: # %s\n\n" % affinity_label)
+
+
+def _write_aaa_domains(f, domains):
+ f.write("\n# Mapping for domain\n")
+ f.write("dr_domain_mappings: \n")
+ for domain in domains:
+ f.write("- primary_name: %s\n" % domain)
+ f.write(" # Fill in the correlated domain in the "
+ "secondary site for domain '%s'\n" % domain)
+ f.write(" secondary_name: # %s\n\n" % domain)
+
+
+def _write_roles(f):
+ f.write("\n# Mapping for role\n")
+ f.write("# Fill in any roles which should be mapped between sites.\n")
+ f.write("dr_role_mappings: \n")
+ f.write("- primary_name: \n")
+ f.write(" secondary_name: \n\n")
+
+
+def _write_vnic_profiles(f, networks):
+ f.write("dr_network_mappings:\n")
+ for network in networks:
+ f.write("- primary_network_name: %s\n" % network['network_name'])
+ f.write("# Data Center name is relevant when multiple vnic profiles"
+ " are maintained.\n")
+ f.write("# please uncomment it in case you have more than one DC.\n")
+ f.write("# primary_network_dc: %s\n" % network['network_dc'])
+ f.write(" primary_profile_name: %s\n" % network['profile_name'])
+ f.write(" primary_profile_id: %s\n" % network['profile_id'])
+ f.write(" # Fill in the correlated vnic profile properties in the "
+ "secondary site for profile '%s'\n" % network['profile_name'])
+ f.write(" secondary_network_name: # %s\n" % network['network_name'])
+ f.write("# Data Center name is relevant when multiple vnic profiles"
+ " are maintained.\n")
+ f.write("# please uncomment it in case you have more than one DC.\n")
+ f.write("# secondary_network_dc: %s\n" % network['network_dc'])
+ f.write(" secondary_profile_name: # %s\n" % network['profile_name'])
+ f.write(" secondary_profile_id: # %s\n\n" % network['profile_id'])
+
+
+def _write_external_lun_disks(f, external_disks, host_storages):
+ f.write("\n# Mapping for external LUN disks\n")
+ f.write("dr_lun_mappings:")
+ for disk in external_disks:
+ disk_id = disk.lun_storage.logical_units[0].id
+ f.write("\n- logical_unit_alias: %s\n" % disk.alias)
+ f.write(" logical_unit_description: %s\n" % disk.description)
+ f.write(" wipe_after_delete: %s\n" % disk.wipe_after_delete)
+ f.write(" shareable: %s\n" % disk.shareable)
+ f.write(" primary_logical_unit_id: %s\n" % disk_id)
+ disk_storage_type = ''
+ if host_storages.get(disk_id) is not None:
+ disk_storage_type = host_storages.get(disk_id).type
+ disk_storage = host_storages.get(disk_id).logical_units[0]
+ f.write(" primary_storage_type: %s\n" % disk_storage_type)
+ if disk_storage_type == otypes.StorageType.ISCSI:
+ portal = ''
+ if disk_storage.portal is not None:
+ splitted = disk_storage.portal.split(',')
+ if len(splitted) > 0:
+ portal = splitted[1]
+ f.write(" primary_logical_unit_address: %s\n"
+ " primary_logical_unit_port: %s\n"
+ " primary_logical_unit_portal: \"%s\"\n"
+ " primary_logical_unit_target: %s\n"
+ % (disk_storage.address,
+ disk_storage.port,
+ portal,
+ disk_storage.target))
+ if disk_storage.username is not None:
+ f.write(" primary_logical_unit_username: %s\n"
+ " primary_logical_unit_password: "
+ "PLEASE_SET_PASSWORD_HERE\n"
+ % disk_storage.username)
+
+ f.write(" # Fill in the following properties of the external LUN "
+ "disk in the secondary site\n")
+ f.write(
+ " secondary_storage_type: %s\n" % (
+ disk_storage_type
+ if disk_storage_type != ''
+ else "STORAGE TYPE COULD NOT BE FETCHED!"
+ )
+ )
+ f.write(" secondary_logical_unit_id: # %s\n" % disk_id)
+ if disk_storage_type == otypes.StorageType.ISCSI:
+ f.write(" secondary_logical_unit_address: # %s\n"
+ " secondary_logical_unit_port: # %s\n"
+ " secondary_logical_unit_portal: # \"%s\"\n"
+ " secondary_logical_unit_target: # %s\n"
+ % (disk_storage.address,
+ disk_storage.port,
+ portal,
+ disk_storage.target))
+ if disk_storage.username is not None:
+ f.write(" secondary_logical_unit_username: # %s\n"
+ " secondary_logical_unit_password:"
+ "PLEASE_SET_PASSWORD_HERE\n"
+ % disk_storage.username)
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars.py
new file mode 100755
index 00000000..269a8638
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import os.path
+import subprocess
+import sys
+
+from configparser import ConfigParser
+from ansible.module_utils.six.moves import input
+
+import ovirtsdk4 as sdk
+
+from bcolors import bcolors
+
+
+INFO = bcolors.OKGREEN
+INPUT = bcolors.OKGREEN
+WARN = bcolors.WARNING
+FAIL = bcolors.FAIL
+END = bcolors.ENDC
+PREFIX = "[Generate Mapping File] "
+CA_DEF = '/etc/pki/ovirt-engine/ca.pem'
+USERNAME_DEF = 'admin@internal'
+SITE_DEF = 'http://localhost:8080/ovirt-engine/api'
+PLAY_DEF = "../examples/dr_play.yml"
+
+
+class GenerateMappingFile:
+
+ def run(self, conf_file, log_file, log_level):
+ log = self._set_log(log_file, log_level)
+ log.info("Start generate variable mapping file "
+ "for oVirt ansible disaster recovery")
+ dr_tag = "generate_mapping"
+ site, username, password, ca_file, var_file, ansible_play_file = \
+ self._init_vars(conf_file, log)
+ log.info("Site address: %s \n"
+ "username: %s \n"
+ "password: *******\n"
+ "ca file location: %s \n"
+ "output file location: %s \n"
+ "ansible play location: %s ",
+ site, username, ca_file, var_file, ansible_play_file)
+ if not self._validate_connection(log,
+ site,
+ username,
+ password,
+ ca_file):
+ self._print_error(log)
+ sys.exit()
+ extra_vars = "site={0} username={1} password={2} ca={3} var_file={4}".\
+ format(site, username, password, ca_file, var_file)
+ command = [
+ "ansible-playbook", ansible_play_file,
+ "-t", dr_tag,
+ "-e", extra_vars,
+ "-vvvvv"
+ ]
+ log.info("Executing command %s", ' '.join(map(str, command)))
+ if log_file is not None and log_file != '':
+ self._log_to_file(log_file, command)
+ else:
+ self._log_to_console(command, log)
+
+ if not os.path.isfile(var_file):
+ log.error("Can not find output file in '%s'.", var_file)
+ self._print_error(log)
+ sys.exit()
+ log.info("Var file location: '%s'", var_file)
+ self._print_success(log)
+
+ def _log_to_file(self, log_file, command):
+ with open(log_file, "a") as f:
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ f.write(line)
+ for line in iter(proc.stderr.readline, ''):
+ f.write(line)
+ print("%s%s%s" % (FAIL, line, END))
+
+ def _log_to_console(self, command, log):
+ proc = subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in iter(proc.stdout.readline, ''):
+ log.debug(line)
+ for line in iter(proc.stderr.readline, ''):
+ log.error(line)
+
+ def _set_log(self, log_file, log_level):
+ logger = logging.getLogger(PREFIX)
+
+ if log_file is not None and log_file != '':
+ formatter = logging.Formatter(
+ '%(asctime)s %(levelname)s %(message)s')
+ hdlr = logging.FileHandler(log_file)
+ hdlr.setFormatter(formatter)
+ else:
+ hdlr = logging.StreamHandler(sys.stdout)
+
+ logger.addHandler(hdlr)
+ logger.setLevel(log_level)
+ return logger
+
+ def _print_success(self, log):
+ msg = "Finished generating variable mapping file " \
+ "for oVirt ansible disaster recovery."
+ log.info(msg)
+ print("%s%s%s%s" % (INFO, PREFIX, msg, END))
+
+ def _print_error(self, log):
+ msg = "Failed to generate var file."
+ log.error(msg)
+ print("%s%s%s%s" % (FAIL, PREFIX, msg, END))
+
+ def _connect_sdk(self, url, username, password, ca):
+ connection = sdk.Connection(
+ url=url,
+ username=username,
+ password=password,
+ ca_file=ca,
+ )
+ return connection
+
+ def _validate_connection(self,
+ log,
+ url,
+ username,
+ password,
+ ca):
+ conn = None
+ try:
+ conn = self._connect_sdk(url,
+ username,
+ password,
+ ca)
+ dcs_service = conn.system_service().data_centers_service()
+ dcs_service.list()
+ except Exception as e:
+ msg = "Connection to setup has failed. " \
+ "Please check your credentials: " \
+ "\n URL: " + url + \
+ "\n user: " + username + \
+ "\n CA file: " + ca
+ log.error(msg)
+ print("%s%s%s%s" % (FAIL, PREFIX, msg, END))
+ log.error("Error: %s", e)
+ if conn:
+ conn.close()
+ return False
+ return True
+
+ def _validate_output_file_exists(self, output_file, log):
+ _dir = os.path.dirname(output_file)
+ if _dir != '' and not os.path.exists(_dir):
+ log.warn("Path '%s' does not exist. Creating the directory.", _dir)
+ os.makedirs(_dir)
+ if os.path.isfile(output_file):
+ valid = {"yes": True, "y": True, "ye": True,
+ "no": False, "n": False}
+ ans = input("%s%sThe output file '%s' already exists. "
+ "Would you like to override it (y,n)? %s"
+ % (WARN, PREFIX, output_file, END))
+ while True:
+ ans = ans.lower()
+ if ans in valid:
+ if valid[ans]:
+ break
+ msg = "Failed to create output file. " \
+ "File could not be overridden."
+ log.error(msg)
+ print("%s%s%s%s" % (FAIL, PREFIX, msg, END))
+ sys.exit(0)
+ ans = input("%s%sPlease respond with 'yes' or 'no': %s"
+ % (INPUT, PREFIX, END))
+ try:
+ os.remove(output_file)
+ except OSError:
+ log.error("File %s could not be replaced.", output_file)
+ print("%s%sFile %s could not be replaced.%s"
+ % (FAIL, PREFIX, output_file, END))
+ sys.exit(0)
+
+ def _init_vars(self, conf_file, log):
+ """ Declare constants """
+ _SECTION = 'generate_vars'
+ _SITE = 'site'
+ _USERNAME = 'username'
+ _PASSWORD = 'password'
+ _CA_FILE = 'ca_file'
+ # TODO: Must have full path, should add relative path support.
+ _OUTPUT_FILE = 'output_file'
+ _ANSIBLE_PLAY = 'ansible_play'
+
+ settings = ConfigParser()
+ settings.read(conf_file)
+ if _SECTION not in settings.sections():
+ settings.add_section(_SECTION)
+ if not settings.has_option(_SECTION, _SITE):
+ settings.set(_SECTION, _SITE, '')
+ if not settings.has_option(_SECTION, _USERNAME):
+ settings.set(_SECTION, _USERNAME, '')
+ if not settings.has_option(_SECTION, _PASSWORD):
+ settings.set(_SECTION, _PASSWORD, '')
+ if not settings.has_option(_SECTION, _CA_FILE):
+ settings.set(_SECTION, _CA_FILE, '')
+ if not settings.has_option(_SECTION, _OUTPUT_FILE):
+ settings.set(_SECTION, _OUTPUT_FILE, '')
+ if not settings.has_option(_SECTION, _ANSIBLE_PLAY):
+ settings.set(_SECTION, _ANSIBLE_PLAY, '')
+
+ site = settings.get(_SECTION, _SITE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ site=None))
+
+ username = settings.get(_SECTION, _USERNAME,
+ vars=DefaultOption(settings,
+ _SECTION,
+ username=None))
+
+ password = settings.get(_SECTION, _PASSWORD,
+ vars=DefaultOption(settings,
+ _SECTION,
+ password=None))
+
+ ca_file = settings.get(_SECTION, _CA_FILE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ ca_file=None))
+ ca_file = os.path.expanduser(ca_file)
+
+ output_file = settings.get(_SECTION, _OUTPUT_FILE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ output_file=None))
+ output_file = os.path.expanduser(output_file)
+
+ ansible_play_file = settings.get(_SECTION, _ANSIBLE_PLAY,
+ vars=DefaultOption(settings,
+ _SECTION,
+ ansible_play=None))
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ if not site:
+ site = input("%s%sSite address is not initialized. "
+ "Please provide the site URL (%s): %s"
+ % (INPUT, PREFIX, SITE_DEF, END)
+ ) or SITE_DEF
+ if not username:
+ username = input("%s%sUsername is not initialized. "
+ "Please provide the username (%s): %s"
+ % (INPUT, PREFIX, USERNAME_DEF, END)
+ ) or USERNAME_DEF
+ while not password:
+ password = input("%s%sPassword is not initialized. "
+ "Please provide the password for username %s: %s"
+ % (INPUT, PREFIX, username, END))
+
+ while not os.path.isfile(ca_file):
+ ca_file = input("%s%sCA file '%s' does not exist. "
+ "Please provide the CA file location (%s):%s "
+ % (INPUT, PREFIX, ca_file, CA_DEF, END)
+ ) or CA_DEF
+ ca_file = os.path.expanduser(ca_file)
+
+ while not output_file:
+ output_file = input("%s%sOutput file location is not initialized. "
+ "Please provide the output file location "
+ "for the mapping var file (%s): %s"
+ % (INPUT, PREFIX, _OUTPUT_FILE, END)
+ ) or _OUTPUT_FILE
+ output_file = os.path.expanduser(output_file)
+ self._validate_output_file_exists(output_file, log)
+
+ while not os.path.isfile(ansible_play_file):
+ ansible_play_file = input("%s%sAnsible play file '%s' does not "
+ "exist. Please provide the ansible play "
+ "file to generate the mapping var file "
+ "(%s): %s" % (INPUT,
+ PREFIX,
+ ansible_play_file,
+ PLAY_DEF,
+ END)
+ ) or PLAY_DEF
+ ansible_play_file = os.path.expanduser(ansible_play_file)
+
+ return site, username, password, ca_file, output_file, ansible_play_file
+
+
+class DefaultOption(dict):
+
+ def __init__(self, config, section, **kv):
+ self._config = config
+ self._section = section
+ dict.__init__(self, **kv)
+
+ def items(self):
+ _items = []
+ for option in self:
+ if not self._config.has_option(self._section, option):
+ _items.append((option, self[option]))
+ else:
+ value_in_config = self._config.get(self._section, option)
+ _items.append((option, value_in_config))
+ return _items
+
+
+if __name__ == "__main__":
+ GenerateMappingFile().run(conf_file='dr.conf',
+ log_file='/tmp/ovirt-dr.log',
+ log_level=logging.getLevelName("DEBUG"))
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars_test.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars_test.py
new file mode 100755
index 00000000..be529cd0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/generate_vars_test.py
@@ -0,0 +1,38 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+from contextlib import contextmanager
+import pexpect
+
+
+@contextmanager
+def generator(tmpdir):
+ env = dict(os.environ)
+ env["PYTHONUNBUFFERED"] = "x"
+ env["GENERATE_VARS_CONF_DIR"] = str(tmpdir)
+ env["GENERATE_VARS_OUT_DIR"] = str(tmpdir)
+ gen = pexpect.spawn('./generate-vars', env=env)
+ try:
+ yield gen
+ finally:
+ gen.terminate(force=True)
+
+
+INITIAL_CONF = """
+[generate_vars]
+"""
+
+
+def test_initial_conf(tmpdir):
+ conf = tmpdir.join("dr.conf")
+ conf.write(INITIAL_CONF)
+ with generator(tmpdir) as gen:
+ # TODO: Use regex
+ gen.expect('override')
+ # Add dry run
+ gen.sendline('y')
+ # "/tmp/dr_ovirt-ansible/mapping_vars.yml"
+ assert os.path.exists("/tmp/dr_ovirt-ansible/mapping_vars.yml")
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/ovirt-dr b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/ovirt-dr
new file mode 100755
index 00000000..3ccee8e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/ovirt-dr
@@ -0,0 +1,159 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging as logg
+import os
+import sys
+import time
+import getopt
+
+from configparser import ConfigParser
+from six.moves import input
+
+import fail_back
+import fail_over
+import generate_vars
+import validator
+
+VALIDATE = 'validate'
+GENERATE = 'generate'
+FAILOVER = 'failover'
+FAILBACK = 'failback'
+LOG_FILE = 'log-file'
+LOG_LEVEL = 'log-level'
+DEF_LOG_FILE = ""
+DEF_DEBUG_LEVEL = 'DEBUG'
+DEF_CONF_FILE = 'dr.conf'
+
+
+def main(argv):
+ action, conf_file, log_file, log_level = _init_vars(argv)
+ while not os.path.isfile(conf_file):
+ conf_file = input(
+ "Conf file '" + conf_file + "' does not exist."
+ " Please provide the configuration file location: ")
+
+ if action != 'validate':
+ log_file = log_file.format(int(round(time.time() * 1000)))
+ if log_level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR']:
+ print("ovirt-dr: log level must be 'DEBUG' 'INFO' 'WARNING' 'ERROR'\n"
+ "Use 'ovirt-dr --help' for more information.")
+ sys.exit(2)
+
+ create_log_dir(log_file)
+ _print_log_file_name(log_file)
+ if action == 'validate':
+ validator.ValidateMappingFile().run(conf_file)
+ elif action == 'generate':
+ generate_vars.GenerateMappingFile().run(conf_file,
+ log_file,
+ logg.getLevelName(log_level))
+ _print_log_file_name(log_file)
+ elif action == 'failover':
+ fail_over.FailOver().run(conf_file,
+ log_file,
+ logg.getLevelName(log_level))
+ _print_log_file_name(log_file)
+ elif action == 'failback':
+ fail_back.FailBack().run(conf_file,
+ log_file,
+ logg.getLevelName(log_level))
+ _print_log_file_name(log_file)
+ elif action == '--help':
+ help_log()
+ else:
+ print("\tError: action '%s' is not defined" % action)
+ help_log()
+
+
+def _print_log_file_name(log_file):
+ if log_file is not None and log_file != '':
+ print("Log file: '%s'" % log_file)
+
+
+def _init_vars(argv):
+ conf_file = DEF_CONF_FILE
+ log_file = ''
+ log_level = ''
+
+ if len(argv) == 0:
+ print("ovirt-dr: missing action operand\n"
+ "Use 'ovirt-dr --help' for more information.")
+ sys.exit(2)
+ action = argv[0]
+
+ try:
+ opts, args = \
+ getopt.getopt(argv[1:], "f:log:level:",
+ ["conf-file=", "log-file=", "log-level="])
+ except getopt.GetoptError:
+ help_log()
+ sys.exit(2)
+
+ for opt, arg in opts:
+ if opt in ("-f", "--conf-file"):
+ conf_file = arg
+ if opt in ("-log", "--log-file"):
+ log_file = arg
+ if opt in ("-level", "--log-level"):
+ log_level = arg
+
+ log_file, log_level = _get_log_conf(conf_file, log_file, log_level)
+ return action, conf_file, log_file, log_level.upper()
+
+
+def _get_log_conf(conf_file, log_file, log_level):
+ log_section = "log"
+ log_file_conf = "log_file"
+ log_level_conf = "log_level"
+ while not os.path.isfile(conf_file):
+ conf_file = input(
+ "Conf file '" + conf_file + "' does not exist."
+ " Please provide the configuration file location: ")
+ settings = ConfigParser()
+ settings.read(conf_file)
+ if log_section not in settings.sections():
+ settings.add_section(log_section)
+ if settings.has_option(log_section, log_file_conf) and \
+ (log_file is None or log_file == ''):
+ log_file = settings.get(log_section, log_file_conf)
+ if settings.has_option(log_section, log_level_conf) and \
+ (log_level is None or log_level == ''):
+ log_level = settings.get(log_section, log_level_conf)
+ else:
+ log_level = "DEBUG"
+ return log_file, log_level
+
+
+def create_log_dir(fname):
+ _dir = os.path.dirname(fname)
+ if _dir != '' and not os.path.exists(_dir):
+ os.makedirs(_dir)
+
+
+def help_log():
+ print(
+ '''
+ \tusage: ovirt-dr <%s/%s/%s/%s>
+ [--conf-file=dr.conf]
+ [--log-file=log_file.log]
+ [--log-level=DEBUG/INFO/WARNING/ERROR]\n
+ \tHere is a description of the following actions:\n
+ \t\t%s\tGenerate the mapping var file based on primary setup
+ \t\t%s\tValidate the var file mapping
+ \t\t%s\tStart a failover process to the target setup
+ \t\t%s\tStart a failback process to the source setup
+ ''' % (GENERATE,
+ VALIDATE,
+ FAILOVER,
+ FAILBACK,
+ GENERATE,
+ VALIDATE,
+ FAILOVER,
+ FAILBACK))
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/validator.py b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/validator.py
new file mode 100755
index 00000000..12dc1118
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/validator.py
@@ -0,0 +1,732 @@
+#!/usr/bin/python3
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import yaml
+
+import ovirtsdk4 as sdk
+import ovirtsdk4.types as types
+
+from bcolors import bcolors
+from configparser import ConfigParser
+from ansible.module_utils.six.moves import input
+
+
+INFO = bcolors.OKGREEN
+INPUT = bcolors.OKGREEN
+WARN = bcolors.WARNING
+FAIL = bcolors.FAIL
+END = bcolors.ENDC
+PREFIX = "[Validate Mapping File] "
+
+
+class ValidateMappingFile:
+
+ def_var_file = "../examples/disaster_recovery_vars.yml"
+ default_main_file = "../defaults/main.yml"
+ var_file = ""
+ running_vms = "dr_running_vms"
+ cluster_map = 'dr_cluster_mappings'
+ domain_map = 'dr_import_storages'
+ role_map = 'dr_role_mappings'
+ aff_group_map = 'dr_affinity_group_mappings'
+ aff_label_map = 'dr_affinity_label_mappings'
+ network_map = 'dr_network_mappings'
+
+ def run(self, conf_file):
+ print("%s%sValidate variable mapping file "
+ "for oVirt ansible disaster recovery%s"
+ % (INFO, PREFIX, END))
+ self._set_dr_conf_variables(conf_file)
+ print("%s%sVar File: '%s'%s" % (INFO, PREFIX, self.var_file, END))
+
+ python_vars = self._read_var_file()
+ if (not self._validate_lists_in_mapping_file(python_vars)
+ or not self._validate_duplicate_keys(python_vars)
+ or not self._entity_validator(python_vars)
+ or not self._validate_failback_leftovers()):
+ self._print_finish_error()
+ sys.exit()
+
+ if not self._validate_hosted_engine(python_vars):
+ self._print_finish_error()
+ sys.exit()
+
+ if not self._validate_export_domain(python_vars):
+ self._print_finish_error()
+ sys.exit()
+ self._print_finish_success()
+
+ def _validate_lists_in_mapping_file(self, mapping_vars):
+ return self._is_list(mapping_vars, self.cluster_map) and self._is_list(
+ mapping_vars, self.domain_map) and self._is_list(
+ mapping_vars, self.role_map) and self._is_list(
+ mapping_vars, self.aff_group_map) and self._is_list(
+ mapping_vars, self.aff_label_map) and self._is_list(
+ mapping_vars, self.network_map)
+
+ def _is_list(self, mapping_vars, mapping):
+ map_file = mapping_vars.get(mapping)
+ if not isinstance(map_file, list) and map_file is not None:
+ print("%s%s%s is not a list: '%s'."
+ " Please check your mapping file%s"
+ % (FAIL, PREFIX, mapping, map_file, END))
+ return False
+ return True
+
+ def _print_finish_error(self):
+ print("%s%sFailed to validate variable mapping file "
+ "for oVirt ansible disaster recovery%s"
+ % (FAIL, PREFIX, END))
+
+ def _print_finish_success(self):
+ print("%s%sFinished validation of variable mapping file "
+ "for oVirt ansible disaster recovery%s"
+ % (INFO, PREFIX, END))
+
+ def _read_var_file(self):
+ with open(self.var_file, 'r') as info:
+ info_dict = yaml.safe_load(info)
+ return info_dict
+
+ def _set_dr_conf_variables(self, conf_file):
+ _SECTION = 'validate_vars'
+ _VAR_FILE = 'var_file'
+
+ # Get default location of the yml var file.
+ settings = ConfigParser()
+ settings.read(conf_file)
+ if _SECTION not in settings.sections():
+ settings.add_section(_SECTION)
+ if not settings.has_option(_SECTION, _VAR_FILE):
+ settings.set(_SECTION, _VAR_FILE, '')
+ var_file = settings.get(_SECTION, _VAR_FILE,
+ vars=DefaultOption(settings,
+ _SECTION,
+ site=self.def_var_file))
+ var_file = os.path.expanduser(var_file)
+
+ while not os.path.isfile(var_file):
+ var_file = input("%s%sVar file '%s' does not exist. Please "
+ "provide the location of the var file (%s): %s"
+ % (WARN, PREFIX, var_file, self.def_var_file, END)
+ ) or self.def_var_file
+ var_file = os.path.expanduser(var_file)
+ self.var_file = var_file
+
+ self.primary_pwd = input(
+ "%s%sPlease provide password for the primary setup: %s"
+ % (INPUT, PREFIX, END))
+ self.second_pwd = input(
+ "%s%sPlease provide password for the secondary setup: %s"
+ % (INPUT, PREFIX, END))
+
+ def _print_duplicate_keys(self, duplicates, keys):
+ ret_val = False
+ for key in keys:
+ if len(duplicates[key]) > 0:
+ print("%s%sFound the following duplicate keys in %s: %s%s" %
+ (FAIL, PREFIX, key, list(duplicates[key]), END))
+ ret_val = True
+ return ret_val
+
+ def _entity_validator(self, python_vars):
+ ovirt_setups = ConnectSDK(
+ python_vars,
+ self.primary_pwd,
+ self.second_pwd)
+ isValid = ovirt_setups.validate_primary()
+ isValid = ovirt_setups.validate_secondary() and isValid
+ if isValid:
+ primary_conn, second_conn = '', ''
+ try:
+ primary_conn = ovirt_setups.connect_primary()
+ if primary_conn is None:
+ return False
+ isValid = self._validate_entities_in_setup(
+ primary_conn, 'primary', python_vars) and isValid
+ second_conn = ovirt_setups.connect_secondary()
+ if second_conn is None:
+ return False
+ isValid = self._validate_entities_in_setup(
+ second_conn, 'secondary', python_vars) and isValid
+ cluster_mapping = python_vars.get(self.cluster_map)
+ isValid = isValid and self._validate_vms_for_failback(
+ primary_conn,
+ "primary")
+ isValid = isValid and self._validate_vms_for_failback(
+ second_conn,
+ "secondary")
+ isValid = isValid and self._is_compatible_versions(
+ primary_conn,
+ second_conn,
+ cluster_mapping)
+ finally:
+ # Close the connections.
+ if primary_conn:
+ primary_conn.close()
+ if second_conn:
+ second_conn.close()
+
+ return isValid
+
+ def _validate_failback_leftovers(self):
+ valid = {"yes": True, "y": True, "ye": True,
+ "no": False, "n": False}
+ with open(self.default_main_file, 'r') as stream:
+ try:
+ info_dict = yaml.safe_load(stream)
+ running_vms_file = info_dict.get(self.running_vms)
+ if os.path.isfile(running_vms_file):
+ ans = input(
+ "%s%sFile with running vms info already exists from "
+ "previous failback operation. Do you want to "
+ "delete it (yes,no)?: %s" %
+ (WARN, PREFIX, END))
+ ans = ans.lower()
+ if ans in valid and valid[ans]:
+ os.remove(running_vms_file)
+ print("%s%sFile '%s' has been deleted successfully%s" %
+ (INFO, PREFIX, running_vms_file, END))
+ else:
+ print("%s%sFile '%s' has not been deleted."
+ " It will be used in the next failback"
+ " operation%s" %
+ (INFO, PREFIX, running_vms_file, END))
+
+ except yaml.YAMLError as exc:
+ print("%s%syaml file '%s' could not be loaded%s"
+ % (FAIL, PREFIX, self.default_main_file, END))
+ print(exc)
+ return False
+ except OSError as ex:
+ print("%s%sFail to validate failback running vms file '%s'%s"
+ % (FAIL, PREFIX, self.default_main_file, END))
+ print(ex)
+ return False
+ return True
+
+ def _validate_entities_in_setup(self, conn, setup, python_vars):
+ dcs_service = conn.system_service().data_centers_service()
+ dcs_list = dcs_service.list()
+ clusters = []
+ affinity_groups = set()
+ for dc in dcs_list:
+ dc_service = dcs_service.data_center_service(dc.id)
+ clusters_service = dc_service.clusters_service()
+ attached_clusters_list = clusters_service.list()
+ for cluster in attached_clusters_list:
+ clusters.append(cluster.name)
+ cluster_service = clusters_service.cluster_service(cluster.id)
+ affinity_groups.update(
+ self._fetch_affinity_groups(cluster_service))
+ aff_labels = self._get_affinity_labels(conn)
+ aaa_domains = self._get_aaa_domains(conn)
+ # TODO: Remove once vnic profile is validated.
+ networks = self._get_vnic_profile_mapping(conn)
+ isValid = self._validate_networks(
+ python_vars,
+ networks,
+ setup)
+ isValid = self._validate_entity_exists(
+ clusters,
+ python_vars,
+ self.cluster_map,
+ setup) and isValid
+ isValid = self._validate_entity_exists(
+ list(affinity_groups),
+ python_vars,
+ self.aff_group_map,
+ setup) and isValid
+ isValid = self._validate_entity_exists(
+ aff_labels,
+ python_vars,
+ self.aff_label_map,
+ setup) and isValid
+ return isValid
+
+ def _fetch_affinity_groups(self, cluster_service):
+ affinity_groups = set()
+ affinity_groups_service = cluster_service.affinity_groups_service()
+ for affinity_group in affinity_groups_service.list():
+ affinity_groups.add(affinity_group.name)
+ return list(affinity_groups)
+
+ def _get_affinity_labels(self, conn):
+ affinity_labels = set()
+ affinity_labels_service = \
+ conn.system_service().affinity_labels_service()
+ for affinity_label in affinity_labels_service.list():
+ affinity_labels.add(affinity_label.name)
+ return list(affinity_labels)
+
+ def _get_aaa_domains(self, conn):
+ domains = []
+ domains_service = conn.system_service().domains_service()
+ domains_list = domains_service.list()
+ for domain in domains_list:
+ domains.append(domain.name)
+ return domains
+
+ def _get_vnic_profile_mapping(self, conn):
+ networks = []
+ vnic_profiles_service = conn.system_service().vnic_profiles_service()
+ vnic_profile_list = vnic_profiles_service.list()
+ for vnic_profile_item in vnic_profile_list:
+ mapped_network = {}
+ networks_list = conn.system_service().networks_service().list()
+ network_name = ''
+ for network_item in networks_list:
+ if network_item.id == vnic_profile_item.network.id:
+ network_name = network_item.name
+ dc_name = conn.system_service().data_centers_service(). \
+ data_center_service(network_item.data_center.id). \
+ get()._name
+ break
+ mapped_network['network_name'] = network_name
+ # TODO: 'dc_name' might be referenced before assignment.
+ mapped_network['network_dc'] = dc_name
+ mapped_network['profile_name'] = vnic_profile_item.name
+ networks.append(mapped_network)
+ return networks
+
+ def _key_setup(self, setup, key):
+ if setup == 'primary':
+ if key == 'dr_import_storages':
+ return 'dr_primary_name'
+ if key == 'dr_network_mappings':
+ return ['primary_profile_name',
+ 'primary_network_name',
+ 'primary_network_dc']
+ return 'primary_name'
+ elif setup == 'secondary':
+ if key == 'dr_import_storages':
+ return 'dr_secondary_name'
+ if key == 'dr_network_mappings':
+ return ['secondary_profile_name',
+ 'secondary_network_name',
+ 'secondary_network_dc']
+ return 'secondary_name'
+
+ def _validate_networks(self, var_file, networks_setup, setup):
+ dups = self._get_network_dups(networks_setup)
+ _mappings = var_file.get(self.network_map)
+ keys = self._key_setup(setup, self.network_map)
+ for mapping in _mappings:
+ map_key = mapping[keys[0]] + \
+ "_" + mapping[keys[1]] + \
+ "_" + (mapping[keys[2]] if keys[2] in mapping else "")
+ if map_key in dups:
+ if keys[2] not in mapping:
+ print(
+ "%s%sVnic profile name '%s' and network name '%s'"
+ " are related to multiple data centers in the"
+ " %s setup. Please specify the data center name in"
+ " the mapping var file.%s" %
+ (FAIL,
+ PREFIX,
+ mapping[keys[0]],
+ mapping[keys[1]],
+ setup,
+ END))
+ return False
+ # TODO: Add check whether the data center exists in the setup
+ print("%s%sFinished validation for 'dr_network_mappings' for "
+ "%s setup with success.%s" %
+ (INFO, PREFIX, setup, END))
+ return True
+
+ def _get_network_dups(self, networks_setup):
+ attributes = [attr['profile_name']
+ + "_"
+ + attr['network_name']
+ + "_"
+ + attr['network_dc'] for attr in networks_setup]
+ dups = [x for n, x in enumerate(attributes) if x in attributes[:n]]
+ return dups
+
+ def _validate_entity_exists(self, _list, var_file, key, setup):
+ isValid = True
+ key_setup = self._key_setup(setup, key)
+ _mapping = var_file.get(key)
+ if _mapping is None:
+ return isValid
+ for x in _mapping:
+ if key_setup not in x.keys():
+ print(
+ "%s%sdictionary key '%s' is not included in %s[%s].%s" %
+ (FAIL,
+ PREFIX,
+ key_setup,
+ key,
+ x.keys(),
+ END))
+ isValid = False
+ if isValid and x[key_setup] not in _list:
+ print(
+ "%s%s%s entity '%s':'%s' does not exist in the "
+ "setup.\n%sThe entities which exists in the setup "
+ "are: %s.%s" %
+ (FAIL,
+ PREFIX,
+ key,
+ key_setup,
+ x[key_setup],
+ PREFIX,
+ _list,
+ END))
+ isValid = False
+ if isValid:
+ print(
+ "%s%sFinished validation for '%s' for key name "
+ "'%s' with success.%s" %
+ (INFO, PREFIX, key, key_setup, END))
+ return isValid
+
+ def _validate_hosted_engine(self, var_file):
+ domains = var_file[self.domain_map]
+ hosted = 'hosted_storage'
+ for domain in domains:
+ primary = domain['dr_primary_name']
+ secondary = domain['dr_secondary_name']
+ if primary == hosted or secondary == hosted:
+ print("%s%sHosted storage domains are not supported.%s"
+ % (FAIL, PREFIX, END))
+ return False
+ return True
+
+ def _validate_export_domain(self, var_file):
+ domains = var_file[self.domain_map]
+ for domain in domains:
+ domain_type = domain['dr_storage_domain_type']
+ if domain_type == 'export':
+ print("%s%sExport storage domain is not supported.%s"
+ % (FAIL, PREFIX, END))
+ return False
+ return True
+
+ def _validate_duplicate_keys(self, var_file):
+ clusters = 'clusters'
+ domains = 'domains'
+ roles = 'roles'
+ aff_groups = 'aff_groups'
+ aff_labels = 'aff_labels'
+ network = 'network'
+ key1 = 'primary_name'
+ key2 = 'secondary_name'
+ dr_primary_name = 'dr_primary_name'
+ dr_secondary_name = 'dr_secondary_name'
+
+ duplicates = self._get_dups(
+ var_file, [
+ [clusters, self.cluster_map, key1, key2],
+ [domains, self.domain_map, dr_primary_name, dr_secondary_name],
+ [roles, self.role_map, key1, key2],
+ [aff_groups, self.aff_group_map, key1, key2],
+ [aff_labels, self.aff_label_map, key1, key2]])
+ duplicates[network] = self._get_dup_network(var_file)
+ return not self._print_duplicate_keys(
+ duplicates,
+ [clusters, domains, roles, aff_groups, aff_labels, network])
+
+ def _validate_vms_for_failback(self, setup_conn, setup_type):
+ vms_in_preview = []
+ vms_delete_protected = []
+ service_setup = setup_conn.system_service().vms_service()
+ for vm in service_setup.list():
+ vm_service = service_setup.vm_service(vm.id)
+ if vm.delete_protected:
+ vms_delete_protected.append(vm.name)
+ snapshots_service = vm_service.snapshots_service()
+ for snapshot in snapshots_service.list():
+ if snapshot.snapshot_status == types.SnapshotStatus.IN_PREVIEW:
+ vms_in_preview.append(vm.name)
+ if len(vms_in_preview) > 0:
+ print("%s%sFailback process does not support VMs in preview."
+ " The '%s' setup contains the following previewed vms:"
+ " '%s'%s"
+ % (FAIL, PREFIX, setup_type, vms_in_preview, END))
+ return False
+ if len(vms_delete_protected) > 0:
+ print("%s%sFailback process does not support delete protected"
+ " VMs. The '%s' setup contains the following vms:"
+ " '%s'%s"
+ % (FAIL, PREFIX, setup_type, vms_delete_protected, END))
+ return False
+ return True
+
+ def _is_compatible_versions(self,
+ primary_conn,
+ second_conn,
+ cluster_mapping):
+ """ Validate cluster versions """
+ service_primary = primary_conn.system_service().clusters_service()
+ service_sec = second_conn.system_service().clusters_service()
+ for cluster_map in cluster_mapping:
+ search_prime = "name=%s" % cluster_map['primary_name']
+ search_sec = "name=%s" % cluster_map['secondary_name']
+ cluster_prime = service_primary.list(search=search_prime)[0]
+ cluster_sec = service_sec.list(search=search_sec)[0]
+ prime_ver = cluster_prime.version
+ sec_ver = cluster_sec.version
+ if (prime_ver.major != sec_ver.major
+ or prime_ver.minor != sec_ver.minor):
+ print("%s%sClusters have incompatible versions. "
+ "primary setup ('%s' %s.%s) not equal to "
+ "secondary setup ('%s' %s.%s)%s"
+ % (FAIL,
+ PREFIX,
+ cluster_prime.name,
+ prime_ver.major,
+ prime_ver.minor,
+ cluster_sec.name,
+ sec_ver.major,
+ sec_ver.minor,
+ END))
+ return False
+ return True
+
+ def _get_dups(self, var_file, mappings):
+ duplicates = {}
+ for mapping in mappings:
+ _return_set = set()
+ _mapping = var_file.get(mapping[1])
+ if _mapping is None or len(_mapping) < 1:
+ print("%s%smapping %s is empty in var file%s"
+ % (WARN, PREFIX, mapping[1], END))
+ duplicates[mapping[0]] = _return_set
+ continue
+ _primary = set()
+ _second = set()
+ _return_set.update(
+ set(x[mapping[2]]
+ for x in _mapping
+ if x[mapping[2]]
+ in _primary or _primary.add(x[mapping[2]])))
+ _return_set.update(
+ set(x[mapping[3]]
+ for x in _mapping
+ if x[mapping[3]]
+ in _second or _second.add(x[mapping[3]])))
+ duplicates[mapping[0]] = _return_set
+ return duplicates
+
+ def _get_dup_network(self, var_file):
+ _return_set = set()
+ # TODO: Add data center also
+ _mapping = var_file.get(self.network_map)
+ if _mapping is None or len(_mapping) < 1:
+ print("%s%sNetwork has not been initialized in var file%s"
+ % (WARN, PREFIX, END))
+ return _return_set
+
+ # Check for profile + network name duplicates in primary
+ _primary1 = set()
+ key1_a = 'primary_profile_name'
+ key1_b = 'primary_network_name'
+ key1_c = 'primary_network_dc'
+ for x in _mapping:
+ if x[key1_a] is None or x[key1_b] is None:
+ print("%s%sNetwork '%s' is not initialized in map %s %s%s"
+ % (FAIL,
+ PREFIX,
+ x,
+ x[key1_a],
+ x[key1_b],
+ END))
+ sys.exit()
+ primary_dc_name = ''
+ if key1_c in x:
+ primary_dc_name = x[key1_c]
+ map_key = x[key1_a] + "_" + x[key1_b] + "_" + primary_dc_name
+ if map_key in _primary1:
+ _return_set.add(map_key)
+ else:
+ _primary1.add(map_key)
+
+ # Check for profile + network name duplicates in secondary
+ _second1 = set()
+ val1_a = 'secondary_profile_name'
+ val1_b = 'secondary_network_name'
+ val1_c = 'secondary_network_dc'
+ for x in _mapping:
+ if x[val1_a] is None or x[val1_b] is None:
+ print("%s%sThe following network mapping is not "
+ "initialized in var file mapping:\n"
+ " %s:'%s'\n %s:'%s'%s"
+ % (FAIL,
+ PREFIX,
+ val1_a,
+ x[val1_a],
+ val1_b,
+ x[val1_b],
+ END))
+ sys.exit()
+ secondary_dc_name = ''
+ if val1_c in x:
+ secondary_dc_name = x[val1_c]
+ map_key = x[val1_a] + "_" + x[val1_b] + "_" + secondary_dc_name
+ if map_key in _second1:
+ _return_set.add(map_key)
+ else:
+ _second1.add(map_key)
+
+ return _return_set
+
+
+class DefaultOption(dict):
+
+ def __init__(self, config, section, **kv):
+ self._config = config
+ self._section = section
+ dict.__init__(self, **kv)
+
+ def items(self):
+ _items = []
+ for option in self:
+ if not self._config.has_option(self._section, option):
+ _items.append((option, self[option]))
+ else:
+ value_in_config = self._config.get(self._section, option)
+ _items.append((option, value_in_config))
+ return _items
+
+
+class ConnectSDK:
+ primary_url, primary_user, primary_ca = '', '', ''
+ second_url, second_user, second_ca = '', '', ''
+ prefix = ''
+ error_msg = "%s%s The '%s' field in the %s setup is not " \
+ "initialized in var file mapping.%s"
+
+ def __init__(self, var_file, primary_pwd, second_pwd):
+ """
+ ---
+ dr_sites_primary_url: http://xxx.xx.xx.xxx:8080/ovirt-engine/api
+ dr_sites_primary_username: admin@internal
+ dr_sites_primary_ca_file: /etc/pki/ovirt-engine/ca.pem
+
+ # Please fill in the following properties for the secondary site:
+ dr_sites_secondary_url: http://yyy.yy.yy.yyy:8080/ovirt-engine/api
+ dr_sites_secondary_username: admin@internal
+ dr_sites_secondary_ca_file: /etc/pki/ovirt-engine_secondary/ca.pem
+ """
+ self.primary_url = var_file.get('dr_sites_primary_url')
+ self.primary_user = var_file.get('dr_sites_primary_username')
+ self.primary_ca = var_file.get('dr_sites_primary_ca_file')
+ self.second_url = var_file.get('dr_sites_secondary_url')
+ self.second_user = var_file.get('dr_sites_secondary_username')
+ self.second_ca = var_file.get('dr_sites_secondary_ca_file')
+ self.primary_pwd = primary_pwd
+ self.second_pwd = second_pwd
+
+ def validate_primary(self):
+ isValid = True
+ if self.primary_url is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "url",
+ "primary",
+ END))
+ isValid = False
+ if self.primary_user is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "username",
+ "primary",
+ END))
+ isValid = False
+ if self.primary_ca is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "ca",
+ "primary",
+ END))
+ isValid = False
+ return isValid
+
+ def validate_secondary(self):
+ isValid = True
+ if self.second_url is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "url",
+ "secondary",
+ END))
+ isValid = False
+ if self.second_user is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "username",
+ "secondary",
+ END))
+ isValid = False
+ if self.second_ca is None:
+ print(self.error_msg % (
+ FAIL,
+ PREFIX,
+ "ca",
+ "secondary",
+ END))
+ isValid = False
+ return isValid
+
+ def _validate_connection(self, url, username, password, ca):
+ conn = None
+ try:
+ conn = self._connect_sdk(url, username, password, ca)
+ dcs_service = conn.system_service().data_centers_service()
+ dcs_service.list()
+ except Exception:
+ print(
+ "%s%sConnection to setup has failed."
+ " Please check your credentials: "
+ "\n%s URL: %s"
+ "\n%s user: %s"
+ "\n%s CA file: %s%s" %
+ (FAIL,
+ PREFIX,
+ PREFIX,
+ url,
+ PREFIX,
+ username,
+ PREFIX,
+ ca,
+ END))
+ if conn:
+ conn.close()
+ return None
+ return conn
+
+ def connect_primary(self):
+ return self._validate_connection(self.primary_url,
+ self.primary_user,
+ self.primary_pwd,
+ self.primary_ca)
+
+ def connect_secondary(self):
+ return self._validate_connection(self.second_url,
+ self.second_user,
+ self.second_pwd,
+ self.second_ca)
+
+ def _connect_sdk(self, url, username, password, ca):
+ connection = sdk.Connection(
+ url=url,
+ username=username,
+ password=password,
+ ca_file=ca,
+ )
+ return connection
+
+
+if __name__ == "__main__":
+ ValidateMappingFile().run('dr.conf')
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/vault_secret.sh b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/vault_secret.sh
new file mode 100644
index 00000000..52e31c6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/files/vault_secret.sh
@@ -0,0 +1 @@
+echo $vault_password
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_disks.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_disks.yml
new file mode 100644
index 00000000..e6ee938a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_disks.yml
@@ -0,0 +1,10 @@
+- block:
+ - name: Remove disk
+ ovirt_disk:
+ state: absent
+ id: "{{ disk.id }}"
+ auth: "{{ ovirt_auth }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain.yml
new file mode 100644
index 00000000..b18460e1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain.yml
@@ -0,0 +1,20 @@
+- block:
+ # If we get the exception "Cannot deactivate Master Data Domain while there are running tasks on its Data Center."
+ # We should wait for some time and try again
+ - name: Remove storage domain
+ ovirt_storage_domain:
+ state: absent
+ id: "{{ sd.id }}"
+ name: "{{ sd.name }}"
+ auth: "{{ ovirt_auth }}"
+ host: "{{ host }}"
+ destroy: "{{ dr_force }}"
+ data_center: "{{ sp_uuid }}"
+ register: result
+ until: dr_force or result is not failed
+ retries: "{{ dr_cleanup_retries_maintenance }}"
+ delay: "{{ dr_cleanup_delay_maintenance }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain_process.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain_process.yml
new file mode 100644
index 00000000..b153bb36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_domain_process.yml
@@ -0,0 +1,49 @@
+- block:
+ # TODO: Check what happens when we force remove unattached storage domain (probably should add a default empty GUID as a data center
+ # Answer: When we force remove an unattached storage domain, ansible tries to move it to maintenance and detach it first,
+ # although it might be that this storage domain is already detached and has no related data center, therefor the move to maintenance will fail
+
+ # We set an initial value for sp_uuid since this task is being called
+ # multiple times from the main task and sp_uuid is stateful.
+ - name: Set default boolean value for sp_uuid
+ set_fact: sp_uuid=True
+
+ - name: Detached storage domain - Set sp_uuid with empty GUID
+ set_fact: sp_uuid="00000000-0000-0000-0000-000000000000"
+ when: sd.data_centers is not defined
+
+ - name: Detached storage domain - Fetch active host for remove
+ ovirt_host_info:
+ pattern: "status=up"
+ auth: "{{ ovirt_auth }}"
+ register: host_info
+ when: sd.data_centers is not defined
+
+ - name: Attached storage domain - Fetch active host for remove
+ ovirt_host_info:
+ pattern: "status=up and storage={{ sd.name }}"
+ auth: "{{ ovirt_auth }}"
+ register: host_info
+ when: sd.data_centers is defined
+
+ # If sp_uuid is still initiated with the default boolean value,
+ # that means that there is a data center which the storage domain is attached to it.
+ - name: Attached storage domain - Set sp_uuid
+ set_fact: sp_uuid="{{ sd.data_centers[0].id }}"
+ when: sp_uuid
+
+ - name: Remove storage domain with no force
+ include_tasks: remove_domain.yml
+ vars:
+ host: "{{ host_info.ovirt_hosts[0].id }}"
+ when: "host_info.ovirt_hosts is defined and host_info.ovirt_hosts|length > 0 and not dr_force"
+
+ - name: Force remove storage domain
+ include_tasks: remove_domain.yml
+ vars:
+ host: "00000000-0000-0000-0000-000000000000"
+ when: "dr_force"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml
new file mode 100644
index 00000000..af0af3bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml
@@ -0,0 +1,20 @@
+- block:
+ - name: Fetch invalid storage domain for remove
+ ovirt_storage_domain_info:
+ pattern: name={{ storage['dr_' + dr_source_map + '_name'] }} and {{ dr_inactive_domain_search }}
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_info
+
+ - name: Remove invalid storage domain
+ include_tasks: remove_domain_process.yml
+ vars:
+ sd: "{{ sd }}"
+ with_items:
+ - "{{ storage_domain_info.ovirt_storage_domains }}"
+ when: (not only_master and not sd.master) or (only_master and sd.master)
+ loop_control:
+ loop_var: sd
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml
new file mode 100644
index 00000000..b5034f2c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml
@@ -0,0 +1,26 @@
+- block:
+ - name: Fetch active/maintenance/detached storage domain for remove
+ ovirt_storage_domain_info:
+ pattern: >
+ name={{ storage['dr_' + dr_source_map + '_name'] }} and
+ (
+ datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }} and {{ dr_active_domain_search }} or
+ datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }} and {{ dr_maintenance_domain_search }} or
+ {{ dr_unattached_domain_search }}
+ )
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_info
+
+ - name: Remove valid storage domain
+ include_tasks: remove_domain_process.yml
+ vars:
+ sd: "{{ sd }}"
+ with_items:
+ - "{{ storage_domain_info.ovirt_storage_domains }}"
+ when: (not only_master and not sd.master) or (only_master and sd.master)
+ loop_control:
+ loop_var: sd
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_vms.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_vms.yml
new file mode 100644
index 00000000..4b8edcc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/remove_vms.yml
@@ -0,0 +1,10 @@
+- block:
+ - name: Remove diskless VMs
+ ovirt_vm:
+ state: absent
+ name: "{{ vm.name }}"
+ auth: "{{ ovirt_auth }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vm.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vm.yml
new file mode 100644
index 00000000..e61b22d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vm.yml
@@ -0,0 +1,12 @@
+- block:
+ - name: Shutdown VM
+ ovirt_vm:
+ state: stopped
+ name: "{{ vms.name }}"
+ force: True
+ wait: True
+ auth: "{{ ovirt_auth }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vms.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vms.yml
new file mode 100644
index 00000000..d3a49aa3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/shutdown_vms.yml
@@ -0,0 +1,21 @@
+- block:
+ # Get all the running VMs related to a storage domain and shut them down
+ - name: Fetch VMs in the storage domain
+ ovirt_vm_info:
+ pattern: >
+ status != down and
+ storage.name={{ storage['dr_' + dr_source_map + '_name'] }} and
+ datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }}
+ auth: "{{ ovirt_auth }}"
+ register: vm_info
+
+ # TODO: Add a wait until the VM is really down
+ - name: Shutdown VMs
+ include_tasks: shutdown_vm.yml
+ vars:
+ vms: "{{ item }}"
+ with_items: "{{ vm_info.ovirt_vms }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/update_ovf_store.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/update_ovf_store.yml
new file mode 100644
index 00000000..2a6c324e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean/update_ovf_store.yml
@@ -0,0 +1,18 @@
+- block:
+ - name: Fetch storage domain only if active
+ ovirt_storage_domain_info:
+ pattern: status = active and storage.name={{ storage['dr_' + dr_source_map + '_name'] }}
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_info
+
+ - name: Update OVF store for active storage domain
+ ovirt_storage_domain:
+ state: update_ovf_store
+ name: "{{ iscsi_storage['dr_' + dr_source_map + '_name'] }}"
+ auth: "{{ ovirt_auth }}"
+ with_items:
+ - "{{ storage_domain_info.ovirt_storage_domains }}"
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean_engine.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean_engine.yml
new file mode 100644
index 00000000..c7f70e6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/clean_engine.yml
@@ -0,0 +1,128 @@
+- block:
+ - name: Obtain SSO token
+ ovirt_auth:
+ url: "{{ vars['dr_sites_' + dr_source_map + '_url'] }}"
+ username: "{{ vars['dr_sites_' + dr_source_map + '_username'] }}"
+ password: "{{ vars['dr_sites_' + dr_source_map + '_password'] }}"
+ ca_file: "{{ vars['dr_sites_' + dr_source_map + '_ca_file'] }}"
+
+ - name: Shutdown running VMs
+ include_tasks: clean/shutdown_vms.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ - name: Update OVF_STORE disk for storage domains
+ include_tasks: clean/update_ovf_store.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ - name: Set force remove flag to false for non master domains
+ set_fact: dr_force=False
+
+ # Set all the queries suffix to fetch a storage domain in a specific status.
+ # Note: Export storage domain is not supported and should not be part of storage mapping
+ - name: Setup queries for storage domains
+ set_fact:
+ dr_active_domain_search='status = active and type != cinder'
+ dr_maintenance_domain_search='status = maintenance and type != cinder'
+ dr_unattached_domain_search='status = unattached and type != cinder and type != glance'
+ dr_inactive_domain_search='type != glance and type != cinder and status != active'
+
+ - name: Set master storage domain filter
+ set_fact: only_master=False
+
+ - name: Remove non master storage domains with valid statuses
+ include_tasks: clean/remove_valid_filtered_master_domains.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ # We use inactive filter only at the end, since we are not sure if there were any storage domains
+ # which became inactive on the process or if there were any at the beginning.
+ - name: Set force remove flag to true for non master domains
+ set_fact: dr_force=True
+
+ - name: Remove non master storage domains with invalid statuses using force remove
+ include_tasks: clean/remove_invalid_filtered_master_domains.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ - name: Set master storage domain filter
+ set_fact: only_master=True
+
+ - name: Set force remove flag to false for master domain
+ set_fact: dr_force=False
+
+ - name: Remove master storage domains with valid statuses
+ include_tasks: clean/remove_valid_filtered_master_domains.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ - name: Set force remove flag to true for master domain
+ set_fact: dr_force=True
+
+ - name: Remove master storage domains with invalid statuses using force remove
+ include_tasks: clean/remove_invalid_filtered_master_domains.yml
+ with_items:
+ - "{{ dr_import_storages }}"
+ loop_control:
+ loop_var: storage
+
+ - name: Fetch leftover storage domains
+ ovirt_storage_domain_info:
+ pattern: type != glance
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_info
+
+ # TODO: Document that behavior
+ # Remove VMs only if there are no data storage domains left in the setup
+ - name: Fetch leftover VMs in the setup
+ ovirt_vm_info:
+ pattern: status = down
+ auth: "{{ ovirt_auth }}"
+ register: vm_info
+ when: dr_clean_orphaned_vms and storage_domain_info.ovirt_storage_domains | length == 0
+
+ - name: Remove vms if no storage domains left in setup
+ include_tasks: clean/remove_vms.yml
+ vars:
+ vm: "{{ item }}"
+ with_items: "{{ vm_info.ovirt_vms }}"
+ when: dr_clean_orphaned_vms and storage_domain_info.ovirt_storage_domains | length == 0
+
+ # Remove direct LUN disks
+ - name: Fetch leftover direct LUN disks in the setup
+ ovirt_disk_info:
+ pattern: disk_type = lun and number_of_vms =0
+ auth: "{{ ovirt_auth }}"
+ register: disk_info
+ when: dr_clean_orphaned_disks and storage_domain_info.ovirt_storage_domains | length == 0
+
+ - name: Remove LUN disks if no storage domains left in setup
+ include_tasks: clean/remove_disks.yml
+ vars:
+ disk: "{{ item }}"
+ with_items: "{{ disk_info.ovirt_disks }}"
+ when: dr_clean_orphaned_disks and storage_domain_info.ovirt_storage_domains | length == 0
+
+
+ # Default value is set in role defaults
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ - clean_engine
+
+ always:
+ - name: Revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/generate_mapping.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/generate_mapping.yml
new file mode 100644
index 00000000..67be0250
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/generate_mapping.yml
@@ -0,0 +1,6 @@
+- block:
+ - name: Generate mapping var file
+ command: python3 {{ role_path }}/files/generate_mapping.py -a "{{ site }}" -u "{{ username }}" -p "{{ password }}" -c "{{ ca }}" -f "{{ var_file }}"
+ run_once: true
+ tags:
+ - generate_mapping
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/main.yml
new file mode 100644
index 00000000..ca2afc86
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/main.yml
@@ -0,0 +1,33 @@
+- block:
+ - name: Start to unregister entities
+ include_tasks: unregister_entities.yml
+ tags:
+ - fail_back
+
+ - name: Clean engine setup
+ include_tasks: clean_engine.yml
+ tags:
+ - fail_back
+ - clean_engine
+
+ - name: Failback Replication Sync pause
+ pause:
+ prompt: "[Failback Replication Sync] Please press ENTER once the destination storage domains are ready to be used for the destination setup"
+ tags:
+ - fail_back
+
+ - name: Recover target engine
+ include_tasks: recover_engine.yml
+ tags:
+ - fail_over
+ - fail_back
+
+ - name: Run the appropriate unregistered entities
+ include_tasks: run_unregistered_entities.yml
+ tags:
+ - fail_back
+
+ - name: Genereate mapping var file
+ include_tasks: generate_mapping.yml
+ tags:
+ - generate_mapping
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_domain.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_domain.yml
new file mode 100644
index 00000000..31c2449f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_domain.yml
@@ -0,0 +1,55 @@
+- block:
+ - name: Fetch available hosts in data center
+ ovirt_host_info:
+ pattern: "status=up and datacenter={{ storage['dr_' + dr_target_host + '_dc_name'] }}"
+ auth: "{{ ovirt_auth }}"
+ register: host_info
+ - block:
+ - name: "Check for available hosts"
+ fail: msg="No hosts available"
+ when: host_info.ovirt_hosts.0 is undefined
+ - block:
+ - name: Add storage domain if NFS
+ include_tasks: add_nfs_domain.yml
+ with_items:
+ - "{{ storage }}"
+ when: "storage.dr_domain_type == 'nfs'"
+ loop_control:
+ loop_var: nfs_storage
+
+ - name: Add storage domain if Gluster
+ include_tasks: add_glusterfs_domain.yml
+ with_items:
+ - "{{ storage }}"
+ when: "storage.dr_domain_type == 'glusterfs'"
+ loop_control:
+ loop_var: gluster_storage
+
+ - name: Add storage domain if Posix
+ include_tasks: add_posixfs_domain.yml
+ with_items:
+ - "{{ storage }}"
+ when: "storage.dr_domain_type == 'posixfs'"
+ loop_control:
+ loop_var: posix_storage
+
+ - name: Add storage domain is scsi
+ include_tasks: add_iscsi_domain.yml
+ with_items:
+ - "{{ storage }}"
+ when: "storage.dr_domain_type == 'iscsi'"
+ loop_control:
+ loop_var: iscsi_storage
+
+ - name: Add storage domain if fcp
+ include_tasks: add_fcp_domain.yml
+ with_items:
+ - "{{ storage }}"
+ when: "storage.dr_domain_type == 'fcp'"
+ loop_control:
+ loop_var: fcp_storage
+ when: host_info.ovirt_hosts.0 is defined
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml
new file mode 100644
index 00000000..dc119204
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml
@@ -0,0 +1,30 @@
+- block:
+ - name: Import FCP storage domain
+ ovirt_storage_domain:
+ state: imported
+ id: "{{ fcp_storage['dr_domain_id'] }}"
+ name: "{{ fcp_storage['dr_' + dr_target_host + '_name']|default('') }}"
+ critical_space_action_blocker: "{{ fcp_storage['dr_critical_space_action_blocker'] }}"
+ warning_low_space: "{{ fcp_storage['dr_warning_low_space'] }}"
+ discard_after_delete: "{{ fcp_storage['dr_discard_after_delete'] }}"
+ wipe_after_delete: "{{ fcp_storage['dr_wipe_after_delete'] }}"
+ backup: "{{ fcp_storage['dr_backup'] }}"
+ host: "{{ host_info.ovirt_hosts[0].name }}"
+ auth: "{{ ovirt_auth }}"
+ data_center: "{{ fcp_storage['dr_' + dr_target_host + '_dc_name'] }}"
+ fcp: {}
+ register: result
+
+ - name: Log append to succeed_storage_domains
+ set_fact:
+ succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ fcp_storage['dr_' + dr_target_host + '_name']|default('') }}\" ]"
+ when: result is succeeded
+
+ - name: Log append to failed_storage_domains
+ set_fact:
+ failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ fcp_storage['dr_' + dr_target_host + '_name']|default('') }}\" ]"
+ when: result is failed
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml
new file mode 100644
index 00000000..6960348b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml
@@ -0,0 +1,30 @@
+- block:
+ - name: Add Gluster storage domain
+ ovirt_storage_domain:
+ name: "{{ gluster_storage['dr_' + dr_target_host + '_name'] }}"
+ critical_space_action_blocker: "{{ gluster_storage['dr_critical_space_action_blocker'] }}"
+ domain_function: "{{ gluster_storage['dr_storage_domain_type'] }}"
+ warning_low_space: "{{ gluster_storage['dr_warning_low_space'] }}"
+ wipe_after_delete: "{{ gluster_storage['dr_wipe_after_delete'] }}"
+ backup: "{{ gluster_storage['dr_backup'] }}"
+ host: "{{ host_info.ovirt_hosts[0].name }}"
+ data_center: "{{ gluster_storage['dr_' + dr_target_host + '_dc_name'] }}"
+ auth: "{{ ovirt_auth }}"
+ glusterfs:
+ path: "{{ gluster_storage['dr_' + dr_target_host + '_path'] }}"
+ address: "{{ gluster_storage['dr_' + dr_target_host + '_address'] }}"
+ register: result
+
+ - name: Log append to succeed_storage_domains
+ set_fact:
+ succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ gluster_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+ when: result is succeeded
+
+ - name: Log append to failed_storage_domains
+ set_fact:
+ failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ gluster_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+ when: result is failed
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml
new file mode 100644
index 00000000..a0b3bd77
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml
@@ -0,0 +1,58 @@
+- block:
+ # TODO: Add support for connect to multiple targets with the same LUN.
+ # Every connect should be done using a different ip
+ - block:
+ - name: Login to iSCSI targets
+ ovirt_host:
+ state: iscsilogin
+ name: "{{ host_info.ovirt_hosts[0].name }}"
+ auth: "{{ ovirt_auth }}"
+ iscsi:
+ username: "{{ iscsi_storage['dr_' + dr_target_host + '_username']|default('') }}"
+ password: "{{ iscsi_storage['dr_' + dr_target_host + '_password']|default('') }}"
+ address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}"
+ target: "{{ dr_target }}"
+ # Make port to be optional
+ port: "{{ iscsi_storage['dr_' + dr_target_host + '_port']|default('3260'|int, true) }}"
+ with_items:
+ - "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}"
+ loop_control:
+ loop_var: dr_target
+
+ - name: Import iSCSI storage domain
+ ovirt_storage_domain:
+ state: imported
+ id: "{{ iscsi_storage['dr_domain_id'] }}"
+ name: "{{ iscsi_storage['dr_' + dr_target_host + '_name']|default('') }}"
+ host: "{{ host_info.ovirt_hosts[0].name }}"
+ auth: "{{ ovirt_auth }}"
+ data_center: "{{ iscsi_storage['dr_' + dr_target_host + '_dc_name'] }}"
+ critical_space_action_blocker: "{{ iscsi_storage['dr_critical_space_action_blocker'] }}"
+ warning_low_space: "{{ iscsi_storage['dr_warning_low_space'] }}"
+ wipe_after_delete: "{{ iscsi_storage['dr_wipe_after_delete'] }}"
+ discard_after_delete: "{{ iscsi_storage['dr_discard_after_delete'] }}"
+ backup: "{{ iscsi_storage['dr_backup'] }}"
+ # TODO: For import iSCSI there is no need for the iscsi parameters
+ iscsi:
+ username: "{{ iscsi_storage['dr_' + dr_target_host + '_username']|default('') }}"
+ password: "{{ iscsi_storage['dr_' + dr_target_host + '_password']|default('') }}"
+ address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}"
+ # We use target since state imported in ovirt_storage_domain.py creates a storage domain
+ # which calls login, therfore we must have a target althout the targets were already connected before.
+ # Therefore passing the first target in the list as a transient target.
+ target: "{{ dr_target }}"
+ with_items:
+ - "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}"
+ loop_control:
+ loop_var: dr_target
+ - name: Log append to succeed_storage_domains
+ set_fact:
+ succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ iscsi_storage['dr_' + dr_target_host + '_name']|default('') }}\" ]"
+ rescue:
+ - name: Log append to failed_storage_domains
+ set_fact:
+ failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ iscsi_storage['dr_' + dr_target_host + '_name']|default('') }}\" ]"
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml
new file mode 100644
index 00000000..8c4fa2ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml
@@ -0,0 +1,26 @@
+- block:
+ - name: Add NFS storage domain
+ ovirt_storage_domain:
+ name: "{{ nfs_storage['dr_' + dr_target_host + '_name'] }}"
+ domain_function: "{{ nfs_storage['dr_storage_domain_type'] }}"
+ critical_space_action_blocker: "{{ nfs_storage['dr_critical_space_action_blocker'] }}"
+ wipe_after_delete: "{{ nfs_storage['dr_wipe_after_delete'] }}"
+ backup: "{{ nfs_storage['dr_backup'] }}"
+ host: "{{ host_info.ovirt_hosts[0].name }}"
+ data_center: "{{ nfs_storage['dr_' + dr_target_host + '_dc_name'] }}"
+ auth: "{{ ovirt_auth }}"
+ nfs:
+ path: "{{ nfs_storage['dr_' + dr_target_host + '_path'] }}"
+ address: "{{ nfs_storage['dr_' + dr_target_host + '_address'] }}"
+ - name: Log append to successful storage domains
+ set_fact:
+ succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ nfs_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+
+ rescue:
+ - name: Log append to failed storage domains
+ set_fact:
+ failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ nfs_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml
new file mode 100644
index 00000000..e48c61ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml
@@ -0,0 +1,31 @@
+- block:
+ - name: Add posix storage domain
+ ovirt_storage_domain:
+ name: "{{ posix_storage['dr_' + dr_target_host + '_name'] }}"
+ critical_space_action_blocker: "{{ posix_storage['dr_critical_space_action_blocker'] }}"
+ domain_function: "{{ posix_storage['dr_storage_domain_type'] }}"
+ warning_low_space: "{{ posix_storage['dr_warning_low_space'] }}"
+ wipe_after_delete: "{{ posix_storage['dr_wipe_after_delete'] }}"
+ backup: "{{ posix_storage['dr_backup'] }}"
+ host: "{{ host_info.ovirt_hosts[0].name }}"
+ data_center: "{{ posix_storage['dr_' + dr_target_host + '_dc_name'] }}"
+ auth: "{{ ovirt_auth }}"
+ posixfs:
+ vfs_type: "{{ posix_storage['dr_' + dr_target_host + '_vfs_type'] }}"
+ path: "{{ posix_storage['dr_' + dr_target_host + '_path'] }}"
+ address: "{{ posix_storage['dr_' + dr_target_host + '_address'] }}"
+ register: result
+
+ - name: Log append to succeed_storage_domains
+ set_fact:
+ succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ posix_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+ when: result is succeeded
+
+ - name: Log append to failed_storage_domains
+ set_fact:
+ failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ posix_storage['dr_' + dr_target_host + '_name'] }}\" ]"
+ when: result is failed
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/print_info.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/print_info.yml
new file mode 100644
index 00000000..1dd6f83f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/print_info.yml
@@ -0,0 +1,16 @@
+- block:
+ - name: Generate log file through template
+ template:
+ src: report_log_template.j2
+ dest: /tmp/{{ dr_report_file }}
+ mode: preserve
+
+ - name: Print report file
+ command: cat /tmp/{{ dr_report_file }}
+ register: content
+
+ - name: Print report file to stdout
+ debug: msg="{{ content.stdout_lines | quote }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_template.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_template.yml
new file mode 100644
index 00000000..ec2bc8b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_template.yml
@@ -0,0 +1,27 @@
+- block:
+ - name: Register unregistered Template
+ ovirt_template:
+ state: registered
+ storage_domain: "{{ storage.name }}"
+ id: "{{ unreg_template.id }}"
+ allow_partial_import: "{{ dr_partial_import }}"
+ auth: "{{ ovirt_auth }}"
+ cluster_mappings: "{{ dr_cluster_map }}"
+ domain_mappings: "{{ dr_domain_map }}"
+ vnic_profile_mappings: "{{ dr_network_map }}"
+ role_mappings: "{{ dr_role_map }}"
+ register: template_register_result
+
+ - name: Log append failed Template to issues failed_template_names
+ set_fact:
+ failed_template_names: "{{ failed_template_names }} + [ '{{ unreg_template.name }}' ]"
+ when: template_register_result is failed
+
+ - name: Log append succeed_template_names
+ set_fact:
+ succeed_template_names: "{{ succeed_template_names }} + [ '{{ unreg_template.name }}' ]"
+ when: template_register_result is succeeded
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_templates.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_templates.yml
new file mode 100644
index 00000000..c1b48102
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_templates.yml
@@ -0,0 +1,21 @@
+- block:
+ - name: Fetch unregistered Templates from storage domain
+ ovirt_storage_template_info:
+ nested_attributes: "id"
+ unregistered: True
+ storage_domain: "{{ storage.name }}"
+ auth: "{{ ovirt_auth }}"
+ register: storage_template_info
+
+ - name: Register template
+ include: register_template.yml
+ # The main task is already declared to ignore errors so that might be
+ # redundant to put it here ignore_errors: "{{ ignore | default(yes) }}"
+ with_items: "{{ storage_template_info.ovirt_storage_templates }}"
+ # We use loop_control so storage.name will not be overridden by the nested loop.
+ loop_control:
+ loop_var: unreg_template
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vm.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vm.yml
new file mode 100644
index 00000000..dc91df88
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vm.yml
@@ -0,0 +1,31 @@
+- block:
+ - name: Register VMs
+ ovirt_vm:
+ state: registered
+ storage_domain: "{{ storage.name }}"
+ id: "{{ unreg_vm.id }}"
+ auth: "{{ ovirt_auth }}"
+ allow_partial_import: "{{ dr_partial_import }}"
+ cluster_mappings: "{{ dr_cluster_map }}"
+ domain_mappings: "{{ dr_domain_map }}"
+ role_mappings: "{{ dr_role_map }}"
+ affinity_group_mappings: "{{ dr_affinity_group_map }}"
+ affinity_label_mappings: "{{ dr_affinity_label_map }}"
+ vnic_profile_mappings: "{{ dr_network_map }}"
+ lun_mappings: "{{ dr_lun_map }}"
+ reassign_bad_macs: "{{ dr_reset_mac_pool }}"
+ register: vm_register_result
+
+ - name: Log append failed VM to failed_vm_names
+ set_fact:
+ failed_vm_names: "{{ failed_vm_names }} + [ '{{ unreg_vm.name }}' ]"
+ when: vm_register_result is failed
+
+ - name: Log append succeed_vm_names
+ set_fact:
+ succeed_vm_names: "{{ succeed_vm_names }} + [ '{{ unreg_vm.name }}' ]"
+ when: vm_register_result is succeeded
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vms.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vms.yml
new file mode 100644
index 00000000..d6445b14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/register_vms.yml
@@ -0,0 +1,24 @@
+- block:
+ - name: Fetch unregistered VMs from storage domain
+ ovirt_storage_vm_info:
+ nested_attributes: "id"
+ unregistered: True
+ storage_domain: "{{ storage.name }}"
+ auth: "{{ ovirt_auth }}"
+ register: storage_vm_info
+
+ - name: Set unregistered VMs
+ set_fact:
+ unreg_vms: "{{ unreg_vms|default([]) }} + {{ storage_vm_info.ovirt_storage_vms }}"
+
+ # TODO: We should filter out VMs which already exist in the setup (diskless VMs)
+ - name: Register VM
+ include: register_vm.yml
+ with_items: "{{ storage_vm_info.ovirt_storage_vms }}"
+ # We use loop_control so storage.name will not be overridden by the nested loop.
+ loop_control:
+ loop_var: unreg_vm
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/report_log_template.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/report_log_template.j2
new file mode 100644
index 00000000..6fbccb8a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/report_log_template.j2
@@ -0,0 +1,24 @@
+{% if succeed_vm_names | length > 0 %}
+ The following VMs registered successfully: {{ succeed_vm_names | unique | join (", ") }}
+{% endif %}
+{% if failed_vm_names | length > 0 %}
+ The following VMs failed to be registered: {{ failed_vm_names | unique | join (", ") }}
+{% endif %}
+{% if succeed_template_names | length > 0 %}
+ The following Templates registered successfully: {{ succeed_template_names | unique | join (", ") }}
+{% endif %}
+{% if failed_template_names | length > 0 %}
+ The following Templates failed to be registered: {{ failed_template_names | unique | join (", ") }}
+{% endif %}
+{% if succeed_to_run_vms | length > 0 %}
+ The following VMs started successfully: {{ succeed_to_run_vms | unique | join (", ") }}
+{% endif %}
+{% if failed_to_run_vms | length > 0 %}
+ The following VMs failed to run: {{ failed_to_run_vms | unique | join (", ") }}
+{% endif %}
+{% if succeed_storage_domains | length > 0 %}
+ The following storage domains were successfully added: {{ succeed_storage_domains | unique | join (", ") }}
+{% endif %}
+{% if failed_storage_domains | length > 0 %}
+ The following storage domains were not added: {{ failed_storage_domains | unique | join (", ") }}
+{% endif %}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/run_vms.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/run_vms.yml
new file mode 100644
index 00000000..cdf06ef3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover/run_vms.yml
@@ -0,0 +1,21 @@
+- block:
+ - name: Run VMs
+ ovirt_vm:
+ state: running
+ name: "{{ vms.name }}"
+ wait: False
+ auth: "{{ ovirt_auth }}"
+ register: result
+ - name: Log append succeed_to_run_vms
+ set_fact:
+ succeed_to_run_vms: "{{ succeed_to_run_vms }} + [ '{{ vms.name }}' ]"
+ when: result is succeeded
+
+ - name: Log append failed_to_run_vms
+ set_fact:
+ failed_to_run_vms: "{{ failed_to_run_vms }} + [ '{{ vms.name }}' ]"
+ when: result is failed
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover_engine.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover_engine.yml
new file mode 100644
index 00000000..03f6e9af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/recover_engine.yml
@@ -0,0 +1,211 @@
+- block:
+ - name: Obtain SSO token
+ ovirt_auth:
+ url: "{{ vars['dr_sites_' + dr_target_host + '_url'] }}"
+ username: "{{ vars['dr_sites_' + dr_target_host + '_username'] }}"
+ password: "{{ vars['dr_sites_' + dr_target_host + '_password'] }}"
+ ca_file: "{{ vars['dr_sites_' + dr_target_host + '_ca_file'] }}"
+ ignore_errors: False
+
+ - name: Delete previous report log
+ file:
+ path: "/tmp/{{ dr_report_file }}"
+ state: absent
+ ignore_errors: True
+
+ - name: Create report file
+ file:
+ path: "/tmp/{{ dr_report_file }}"
+ state: touch
+ mode: 0644
+
+ - name: Init entity status list
+ set_fact:
+ failed_vm_names: []
+ succeed_vm_names: []
+ failed_template_names: []
+ succeed_template_names: []
+ failed_to_run_vms: []
+ succeed_to_run_vms: []
+ succeed_storage_domains: []
+ failed_storage_domains: []
+
+ # TODO: We should add a validation task that will validate whether
+ # all the hosts in the other site (primary or secondary) could not be connected
+ # and also set a timer that will wait at least 180 seconds until the first
+ # attach will take place. We should do that to prevent Sanlock failure with acquire
+ # lockspace. We should use a flag with default true whether to have this check
+ # or not.
+
+ # TODO: What happens if master is failed to be attached,
+ # do we still want to continue and attach the other storage
+ # domain (which will make another storage domain as master instead).
+ - name: Add master storage domain to the setup
+ include_tasks: recover/add_domain.yml
+ vars:
+ storage: "{{ item }}"
+ with_items:
+ - "{{ dr_import_storages }}"
+ when: item['dr_' + dr_target_host + '_master_domain']
+
+ - name: Add non master storage domains to the setup
+ include_tasks: recover/add_domain.yml
+ vars:
+ storage: "{{ item }}"
+ with_items:
+ - "{{ dr_import_storages }}"
+ when: not item['dr_' + dr_target_host + '_master_domain']
+
+ # Get all the active storage domains in the setup to register
+ # all the templates/VMs/Disks
+ - name: Fetching active storage domains
+ ovirt_storage_domain_info:
+ pattern: "status=active"
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_info
+
+ - name: Set initial Maps
+ set_fact:
+ dr_cluster_map: "{{ [] }}"
+ dr_affinity_group_map: "{{ [] }}"
+ dr_affinity_label_map: "{{ [] }}"
+ dr_domain_map: "{{ [] }}"
+ dr_role_map: "{{ [] }}"
+ dr_lun_map: "{{ [] }}"
+ dr_network_map: "{{ [] }}"
+
+ - name: Set Cluster Map
+ set_fact:
+ dr_cluster_map: "{{ dr_cluster_map }} + {{ [
+ {
+ 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true),
+ 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true)
+ }
+ ] }}"
+ with_items: "{{ dr_cluster_mappings }}"
+ when: dr_cluster_mappings is not none
+
+ - name: Set Affinity Group Map
+ set_fact:
+ dr_affinity_group_map: "{{ dr_affinity_group_map }} + {{ [
+ {
+ 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true),
+ 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true)
+ }
+ ] }}"
+ with_items: "{{ dr_affinity_group_mappings }}"
+ when: dr_affinity_group_mappings is not none
+
+ - name: Set Network Map
+ set_fact:
+ dr_network_map: "{{ dr_network_map }} + {{ [
+ {
+ 'source_network_name': item[dr_source_map + '_network_name'] | default('EMPTY_ELEMENT', true),
+ 'source_profile_name': item[dr_source_map + '_profile_name'] | default('EMPTY_ELEMENT', true),
+ 'target_network_dc': item[dr_target_host + '_network_dc'] | default('EMPTY_ELEMENT', true),
+ 'target_profile_id': item[dr_target_host + '_profile_id'] | default('00000000-0000-0000-0000-000000000000', true)
+ }
+ ] }}"
+ with_items: "{{ dr_network_mappings }}"
+ when: dr_network_mappings is not none
+
+ - name: Set Affinity Label Map
+ set_fact:
+ dr_affinity_label_map: "{{ dr_affinity_label_map }} + {{ [
+ {
+ 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true),
+ 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true)
+ }
+ ] }}"
+ with_items: "{{ dr_affinity_label_mappings }}"
+ when: dr_affinity_label_mappings is not none
+
+ - name: Set aaa extensions Map
+ set_fact:
+ dr_domain_map: "{{ dr_domain_map }} + {{ [
+ {
+ 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true),
+ 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true)
+ }
+ ] }}"
+ with_items: "{{ dr_domain_mappings }}"
+ when: dr_domain_mappings is not none
+
+ - name: Set Role Map
+ set_fact:
+ dr_role_map: "{{ dr_role_map }} + {{ [
+ {
+ 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true),
+ 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true)
+ }
+ ] }}"
+ with_items: "{{ dr_role_mappings }}"
+ when: dr_role_mappings is not none
+
+ - name: Set Lun Map
+ set_fact:
+ dr_lun_map: "{{ dr_lun_map }} + {{ [
+ {
+ 'source_logical_unit_id': item[dr_source_map + '_logical_unit_id'] | default('EMPTY_ELEMENT', true),
+ 'source_storage_type': item[dr_source_map + '_storage_type'] | default('EMPTY_ELEMENT', true),
+ 'dest_logical_unit_id': item[dr_target_host + '_logical_unit_id'] | default('EMPTY_ELEMENT', true),
+ 'dest_storage_type': item[dr_target_host + '_storage_type'] | default('EMPTY_ELEMENT', true),
+ 'dest_logical_unit_address': item[dr_target_host + '_logical_unit_address'] | default('EMPTY_ELEMENT', true),
+ 'dest_logical_unit_port': item[dr_target_host + '_logical_unit_port'] | default('3260'|int, true),
+ 'dest_logical_unit_portal': item[dr_target_host + '_logical_unit_portal'] | default('1', true),
+ 'dest_logical_unit_username': item[dr_target_host + '_logical_unit_username'] | default('', true),
+ 'dest_logical_unit_password': item[dr_target_host + '_logical_unit_password'] | default('', true),
+ 'dest_logical_unit_target': item[dr_target_host + '_logical_unit_target'] | default('[]', true)
+ }
+ ] }}"
+ with_items: "{{ dr_lun_mappings }}"
+ when: dr_lun_mappings is not none
+
+ # First register all the unregistered templates based on the
+ # active storage domains we fetched before.
+ # We register the Templates first since we might have
+ # VMs which are based on them
+ - name: Register templates
+ include_tasks: recover/register_templates.yml
+ vars:
+ storage: "{{ item }}"
+ with_items:
+ - "{{ storage_domain_info.ovirt_storage_domains }}"
+
+ # Register all the unregistered VMs after we registered
+ # all the templates from the active storage domains fetched before.
+ - name: Register VMs
+ include_tasks: recover/register_vms.yml
+ vars:
+ storage: "{{ item }}"
+ with_items:
+ - "{{ storage_domain_info.ovirt_storage_domains }}"
+
+ # Run all the high availability VMs.
+ - name: Run highly available VMs
+ include_tasks: recover/run_vms.yml
+ vars:
+ vms: "{{ item }}"
+ with_items: "{{ unreg_vms }}"
+ when: item.status == 'up' and item.high_availability.enabled | bool
+
+ # Run all the rest of the VMs.
+ - name: Run the rest of the VMs
+ include_tasks: recover/run_vms.yml
+ vars:
+ vms: "{{ item }}"
+ with_items: "{{ unreg_vms }}"
+ when: item.status == 'up' and not item.high_availability.enabled | bool
+
+ # Default value is set in role defaults
+ ignore_errors: "{{ dr_ignore_error_recover }}"
+ tags:
+ - fail_over
+ - fail_back
+ always:
+ - name: Print operation summary
+ include_tasks: recover/print_info.yml
+ - name: Revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/run_unregistered_entities.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/run_unregistered_entities.yml
new file mode 100644
index 00000000..31b373af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/run_unregistered_entities.yml
@@ -0,0 +1,40 @@
+- block:
+ - name: Obtain SSO token
+ ovirt_auth:
+ url: "{{ vars['dr_sites_' + dr_target_host + '_url'] }}"
+ username: "{{ vars['dr_sites_' + dr_target_host + '_username'] }}"
+ password: "{{ vars['dr_sites_' + dr_target_host + '_password'] }}"
+ ca_file: "{{ vars['dr_sites_' + dr_target_host + '_ca_file'] }}"
+
+ - name: Read file that contains running VMs from the previous setup
+ set_fact: running_vms_fail_back="{{ lookup('file', dr_running_vms) }}"
+
+ - name: Remove dr_running_vms file after being used
+ file:
+ path: "{{ dr_running_vms }}"
+ state: absent
+
+ - name: Run all the high availability VMs
+ include_tasks: recover/run_vms.yml
+ vars:
+ vms: "{{ item }}"
+ with_items: "{{ running_vms_fail_back }}"
+ when: item.high_availability.enabled | bool
+
+ - name: Run all the entire running VMs
+ include_tasks: recover/run_vms.yml
+ vars:
+ vms: "{{ item }}"
+ with_items: "{{ running_vms_fail_back }}"
+ when: not item.high_availability.enabled | bool
+
+ # TODO: Remove dr_report_file
+
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ always:
+ - name: Revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/unregister_entities.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/unregister_entities.yml
new file mode 100644
index 00000000..e079ca5a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/disaster_recovery/tasks/unregister_entities.yml
@@ -0,0 +1,60 @@
+- block:
+ - name: Obtain SSO token
+ ovirt_auth:
+ url: "{{ vars['dr_sites_' + dr_source_map + '_url'] }}"
+ username: "{{ vars['dr_sites_' + dr_source_map + '_username'] }}"
+ password: "{{ vars['dr_sites_' + dr_source_map + '_password'] }}"
+ ca_file: "{{ vars['dr_sites_' + dr_source_map + '_ca_file'] }}"
+
+ # Get all the running VMs and shut them down
+ - name: Fetch running VMs in the setup
+ ovirt_vm_info:
+ pattern: status = up
+ auth: "{{ ovirt_auth }}"
+ register: vm_info
+
+ - name: Check whether file with running VMs info exists
+ stat:
+ path: '{{ dr_running_vms }}'
+ register: stat_result
+
+ - name: Fetch all data of running VMs from file, if exists.
+ set_fact: running_vms_fail_back="{{ lookup('file', dr_running_vms) }}"
+ when: stat_result.stat.exists
+ ignore_errors: True
+
+ - name: Init list property for running_vms
+ set_fact:
+ res_ovirt_vms="[]"
+
+ - name: Map all running vms in fact
+ set_fact:
+ res_ovirt_vms: "{{ res_ovirt_vms }} + {{ [
+ {
+ 'id': item.id,
+ 'name': item.name,
+ 'high_availability': item.high_availability
+ }
+ ] }}"
+ with_items: "{{ vm_info.ovirt_vms }}"
+ when: item.id is defined
+
+ - name: Create file to obtain running vms if file does not exist
+ file:
+ path: '{{ dr_running_vms }}'
+ state: touch
+ mode: 0644
+ when: not stat_result.stat.exists|bool or running_vms_fail_back is not defined
+
+ - name: If no file exists which contains data of unregistered VMs, set the file with running VMs
+ copy: content="{{ res_ovirt_vms }}" dest={{ dr_running_vms }} mode="preserve"
+ when: running_vms_fail_back is not defined or (running_vms_fail_back is defined and running_vms_fail_back | length == 0)
+
+ ignore_errors: "{{ dr_ignore_error_clean }}"
+ tags:
+ - fail_back
+ always:
+ - name: Revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/README.md
new file mode 100644
index 00000000..e8489f5f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/README.md
@@ -0,0 +1,167 @@
+oVirt Engine Setup
+==================
+
+Installs required packages for oVirt Engine deployment, generates answerfile
+and runs engine_setup.
+Optionally the role updates oVirt engine packages.
+
+Role Variables
+--------------
+
+By default engine_setup uses an answer file specific for version of oVirt
+based on ``ovirt_engine_setup_version`` parameter. You can provide your own answer file
+to ``ovirt_engine_setup_answer_file_path`` variable.
+
+* Common options for role:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_answer_file_path | UNDEF | Path to custom answerfile for `engine-setup`. |
+| ovirt_engine_setup_use_remote_answer_file | False | If `True`, use answerfile's path on the remote machine. This option should be used if the installation occurs on the remote machine and the answerfile is located there as well. |
+| ovirt_engine_setup_update_setup_packages | False | If `True`, setup packages will be updated before `engine-setup` is executed. It makes sense if Engine has already been installed. |
+| ovirt_engine_setup_perform_upgrade | False | If `True`, this role is used to perform an upgrade. |
+| ovirt_engine_setup_product_type | oVirt | One of ["oVirt", "RHV"], case insensitive. |
+| ovirt_engine_setup_offline | False | If `True`, updates for all packages will be disabled. |
+| ovirt_engine_setup_restore_engine_cleanup | False | Remove the configuration files and clean the database associated with the Engine, relevant only when `ovirt_engine_setup_restore_file` is defined |
+| ovirt_engine_setup_restore_file | UNDEF | Restored the engine with a backup file which created with engine-backup. |
+| ovirt_engine_setup_restore_scopes | UNDEF | List of scopes following values are available: ["all", "files", "db", "dwhdb", "cinderlibdb"]. |
+| ovirt_engine_setup_restore_options | {} | Dictionary that will add engine restore options as "`--key`=`value`" when `value` is not empty, otherwise it will append "`--key`" only. |
+
+* Common options for engine:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_version | 4.4 | Allowed versions: [4.1, 4.2, 4.3, 4.4]. |
+| ovirt_engine_setup_package_list | [] | List of extra packages to be installed on engine apart from `ovirt-engine` package. |
+| ovirt_engine_setup_fqdn | UNDEF | Host fully qualified DNS name of the server. |
+| ovirt_engine_setup_organization | UNDEF | Organization name for certificate. |
+| ovirt_engine_setup_firewall_manager | firewalld | Specify the type of firewall manager to configure on Engine host, following values are available: `firewalld`,`iptables` or empty value (`null`) to skip firewall configuration. |
+| ovirt_engine_setup_require_rollback | UNDEF | If `True`, setup will require to be able to rollback new packages in case of a failure. If not specified, the default answer from `engine-setup` will be used. Valid for updating/upgrading. |
+| ovirt_engine_setup_admin_password | UNDEF | Password for the automatically created administrative user of the oVirt Engine.
+| ovirt_engine_setup_wait_running_tasks | False | If `True`, engine-setup will wait for running tasks to finish. Valid for `ovirt_engine_setup_version` >= 4.2. |
+| ovirt_engine_cinderlib_enable | False | If `True`, cinderlib is enabled. Valid for `ovirt_engine_setup_version` >= 4.3. |
+| ovirt_engine_grafana_enable | True | If `True`, Grafana integration will be set up. Valid for `ovirt_engine_setup_version` >= 4.4. |
+| ovirt_engine_setup_engine_configs | [] | List of dictionaries with keys `key`, `value` and `version`. The engine-config will be called with parametrs "-s `key`=`value`" when specified `version` it will append "--cver=`version`" to the config. |
+
+* Engine Database:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_db_host | localhost | IP address or host name of a PostgreSQL server for Engine database. By default the database will be configured on the same host as the Engine. |
+| ovirt_engine_setup_db_port | 5432 | Engine database port. |
+| ovirt_engine_setup_db_name | engine | Engine database name. |
+| ovirt_engine_setup_db_user | engine | Engine database user. |
+| ovirt_engine_setup_db_password | UNDEF | Engine database password. |
+| ovirt_engine_setup_engine_vacuum_full | False | Used only when upgrading. If `True`, engine database vacuum will be performed before upgrade. |
+
+* Engine Data Warehouse Database:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_dwh_db_configure | True | If `True`, the DWH Database will be configured manually. |
+| ovirt_engine_setup_dwh_db_host | localhost | IP address or host name of a PostgreSQL server for DWH database. By default the DWH database will be configured on the same host as the Engine. |
+| ovirt_engine_setup_dwh_db_port | 5432 | DWH database port. |
+| ovirt_engine_setup_dwh_db_name | ovirt_engine_history | DWH database name. |
+| ovirt_engine_setup_dwh_db_user | ovirt_engine_history | DWH database user. |
+| ovirt_engine_setup_dwh_db_password | UNDEF | DWH database password. |
+| ovirt_engine_setup_dwh_vacuum_full | False | Used only when upgrading. If `True`, DWH databse vacuum will be performed before upgrade. |
+
+* OVN related options:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_provider_ovn_configure| True | If `True`, OVN provider will be configured. Valid for `ovirt_engine_setup_version` >= 4.2. |
+| ovirt_engine_setup_provider_ovn_username | admin@internal | Username for OVN. |
+| ovirt_engine_setup_provider_ovn_password | UNDEF | Password for OVN. |
+
+* Apache related options:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| ovirt_engine_setup_apache_config_root_redirection | True | If `True`, `engine-setup` will configure the default page in Apache to automatically redirect clients to ovirt-engine default page. |
+| ovirt_engine_setup_apache_config_ssl | True | If `False`, `engine-setup` will not configure Apache SSL settings and administrators will need to configure it manually. |
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+```yaml
+---
+# Example of oVirt setup:
+- name: Setup oVirt
+ hosts: engine
+ vars_files:
+ # Contains encrypted `ovirt_engine_setup_admin_password` variable using ansible-vault
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: '4.4'
+ ovirt_engine_setup_organization: 'of.ovirt.engine.com'
+ roles:
+ - engine_setup
+ collections:
+ - ovirt.ovirt
+
+
+# Example of RHV setup:
+- name: Setup RHV
+ hosts: engine
+ vars_files:
+ # Contains encrypted `ovirt_engine_setup_admin_password` variable using ansible-vault
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: '4.4'
+ ovirt_engine_setup_organization: 'rhv.redhat.com'
+ ovirt_engine_setup_product_type: 'rhv'
+ roles:
+ - engine_setup
+ collections:
+ - ovirt.ovirt
+
+
+# Example of oVirt setup with engine_configs:
+- name: Setup oVirt
+ hosts: engine
+ vars_files:
+ # Contains encrypted `ovirt_engine_setup_admin_password` variable using ansible-vault
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: '4.4'
+ ovirt_engine_setup_organization: 'of.ovirt.engine.com'
+ ovirt_engine_setup_engine_configs:
+ - key: SpiceProxyDefault
+ value: prot://proxy
+ version: general
+
+ roles:
+ - engine_setup
+ collections:
+ - ovirt.ovirt
+
+
+# Example of oVirt engine restore from file with cleanup engine before:
+- name: restore oVirt engine
+ hosts: engine
+ vars_files:
+ # Contains encrypted `ovirt_engine_setup_admin_password` variable using ansible-vault
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: '4.4'
+ ovirt_engine_setup_organization: 'of.ovirt.engine.com'
+ ovirt_engine_setup_restore_engine_cleanup: true
+ ovirt_engine_setup_restore_file: '/path/to/backup.file'
+ ovirt_engine_setup_restore_scopes:
+ - 'files'
+ - 'db'
+ ovirt_engine_setup_restore_options:
+ log: '/path/to/file.log'
+ restore-permissions: ''
+ provision-all-databases: ''
+ roles:
+ - engine_setup
+ collections:
+ - ovirt.ovirt
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/defaults/main.yml
new file mode 100644
index 00000000..6e015ead
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/defaults/main.yml
@@ -0,0 +1,39 @@
+---
+ovirt_engine_setup_version: '4.4'
+
+ovirt_engine_setup_provider_ovn_configure: true
+ovirt_engine_setup_provider_ovn_username: 'admin@internal'
+
+ovirt_engine_setup_db_host: 'localhost'
+ovirt_engine_setup_db_port: 5432
+ovirt_engine_setup_db_name: 'engine'
+ovirt_engine_setup_db_user: 'engine'
+ovirt_engine_setup_engine_vacuum_full: false
+
+ovirt_engine_setup_dwh_db_configure: true
+ovirt_engine_setup_dwh_db_host: 'localhost'
+ovirt_engine_setup_dwh_db_port: 5432
+ovirt_engine_setup_dwh_db_name: 'ovirt_engine_history'
+ovirt_engine_setup_dwh_db_user: 'ovirt_engine_history'
+ovirt_engine_setup_dwh_vacuum_full: false
+
+ovirt_engine_grafana_enable: true
+
+ovirt_engine_setup_firewall_manager: 'firewalld'
+
+# This option is suggested from oVirt Documentation
+# https://www.ovirt.org/documentation/install-guide/chap-Installing_oVirt/
+ovirt_engine_setup_update_setup_packages: false
+ovirt_engine_setup_offline: false
+
+ovirt_engine_setup_product_type: oVirt
+ovirt_engine_setup_package_list: []
+ovirt_engine_setup_use_remote_answer_file: false
+
+ovirt_engine_setup_perform_upgrade: false
+
+ovirt_engine_apache_config_ssl: true
+ovirt_engine_apache_config_root_redirection: true
+
+ovirt_engine_setup_restore_engine_cleanup: false
+ovirt_engine_setup_restore_options: {}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-deploy.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-deploy.yml
new file mode 100644
index 00000000..2a38c36f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-deploy.yml
@@ -0,0 +1,18 @@
+---
+- name: Setup ovirt repositories and deploy oVirt Engine
+ hosts: engine
+ vars_files:
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_product_type: 'ovirt'
+ ovirt_engine_setup_version: "4.4"
+ ovirt_engine_setup_organization: "example.com"
+ ovirt_engine_setup_dwh_db_host: "localhost"
+ ovirt_engine_setup_configure_iso_domain: true
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_repositories_ovirt_release_rpm: "http://plain.resources.ovirt.org/pub/yum-repo/ovirt-release44.rpm"
+ roles:
+ - role: repositories
+ - role: engine_setup
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-upgrade.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-upgrade.yml
new file mode 100644
index 00000000..6cf61fda
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/engine-upgrade.yml
@@ -0,0 +1,19 @@
+---
+- name: Setup ovirt repositories, deploy oVirt Engine and then upgrade it
+ hosts: engine
+ vars_files:
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_product_type: "ovirt"
+ ovirt_engine_setup_version: "{{ ovirt_engine_setup_version }}"
+ ovirt_engine_setup_organization: "example.com"
+ ovirt_engine_setup_configure_iso_domain: true
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_repositories_ovirt_release_rpm: "{{ ovirt_repositories_ovirt_release_rpm }}"
+ roles:
+ - role: repositories
+ - role: engine_setup
+ ovirt_engine_setup_update_packages: true
+ ovirt_engine_setup_answer_file_path: "answerfile_{{ ovirt_engine_setup_version }}_upgrade.txt.j2"
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/passwords.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/passwords.yml
new file mode 100644
index 00000000..80d6df40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+ovirt_engine_setup_admin_password: 123456
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/engine_setup.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/engine_setup.yml
new file mode 100644
index 00000000..c62c9b06
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/engine_setup.yml
@@ -0,0 +1,117 @@
+---
+- block:
+ - name: Set answer file path
+ set_fact:
+ answer_file_path: "/tmp/answerfile-{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}.txt"
+
+ - name: Use the default answerfile
+ template:
+ src: answerfile_{{ ovirt_engine_setup_version }}_basic.txt.j2
+ dest: "{{ answer_file_path }}"
+ mode: 0600
+ owner: root
+ group: root
+ when: ovirt_engine_setup_answer_file_path is undefined
+ no_log: yes
+
+ - name: Copy custom answer file
+ template:
+ src: "{{ ovirt_engine_setup_answer_file_path }}"
+ dest: "{{ answer_file_path }}"
+ mode: 0600
+ owner: root
+ group: root
+ when: ovirt_engine_setup_answer_file_path is defined and (
+ ovirt_engine_setup_use_remote_answer_file is not defined or not
+ ovirt_engine_setup_use_remote_answer_file)
+ no_log: yes
+
+ - name: Use remote's answer file
+ set_fact:
+ answer_file_path: "{{ ovirt_engine_setup_answer_file_path }}"
+ when: ovirt_engine_setup_use_remote_answer_file|bool
+
+ - name: Update setup packages
+ yum:
+ name: "ovirt*setup*"
+ update_only: true
+ state: latest
+ when: ovirt_engine_setup_update_setup_packages or ovirt_engine_setup_perform_upgrade
+ tags:
+ - "skip_ansible_lint" # ANSIBLE0006
+
+ - name: Copy yum configuration file
+ copy:
+ src: "/etc/yum.conf"
+ dest: "/tmp/yum.conf"
+ owner: root
+ group: root
+ mode: 0644
+ remote_src: yes
+
+ - name: Set 'best' to false
+ replace:
+ path: "/tmp/yum.conf"
+ regexp: '^best=True'
+ replace: 'best=False'
+ owner: root
+ group: root
+ mode: 0644
+
+ - name: Update all packages
+ yum:
+ name: '*'
+ state: latest
+ conf_file: /tmp/yum.conf
+ when: not ovirt_engine_setup_offline|bool
+ tags:
+ - "skip_ansible_lint" # ANSIBLE0010
+
+ - name: Remove temporary yum configuration file
+ file:
+ path: "/tmp/yum.conf"
+ state: absent
+ ignore_errors: True
+
+ - name: Set offline parameter if variable is set
+ set_fact:
+ offline: "{{ '--offline' if ovirt_engine_setup_offline|bool else '' }}"
+
+ - name: Restore engine from file
+ include_tasks: restore_engine_from_file.yml
+ when: ovirt_engine_setup_restore_file is defined
+
+ - name: Run engine-setup with answerfile
+ command: "engine-setup --accept-defaults --config-append={{ answer_file_path }} {{ offline }}"
+ tags:
+ - skip_ansible_lint
+
+ - name: Make sure `ovirt-engine` service is running
+ service:
+ name: ovirt-engine
+ state: started
+
+ - name: Check if Engine health page is up
+ uri:
+ url: "http://localhost/ovirt-engine/services/health"
+ status_code: 200
+ register: health_page
+ retries: 30
+ delay: 10
+ until: health_page is success
+
+ - name: Run engine-config
+ command: "engine-config -s {{ item.key }}='{{ item.value }}' {% if item.version is defined %} --cver={{ item.version }} {% endif %}"
+ loop: "{{ ovirt_engine_setup_engine_configs | default([]) }}"
+
+ - name: Restart engine after engine-config
+ service:
+ name: ovirt-engine
+ state: restarted
+ when: ovirt_engine_setup_engine_configs is defined
+
+ always:
+ - name: Clean temporary files
+ file:
+ path: "{{ answer_file_path }}"
+ state: 'absent'
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/install_packages.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/install_packages.yml
new file mode 100644
index 00000000..cde777ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/install_packages.yml
@@ -0,0 +1,25 @@
+---
+- name: Install oVirt Engine package
+ package:
+ name: "ovirt-engine"
+ state: present
+ when: ovirt_engine_setup_product_type | lower == 'ovirt'
+
+- name: Check if rhevm package is installed
+ yum:
+ list: "rhevm"
+ when: ovirt_engine_setup_product_type | lower == 'rhv' and ansible_os_family == 'RedHat'
+ register: rhevm_installed
+
+- name: Install RHV package
+ package:
+ name: "{{ 'rhevm' if ovirt_engine_setup_version is version('4.2', '<') else 'rhvm' }}"
+ state: present
+ when: ovirt_engine_setup_product_type | lower == 'rhv' and rhevm_installed.results | default([]) | selectattr(
+ 'yumstate', 'match', 'installed') | list | length == 0
+
+- name: Install rest of the packages required for oVirt Engine deployment
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ ovirt_engine_setup_package_list }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/main.yml
new file mode 100644
index 00000000..5504df10
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- name: Perform pre-install checks
+ include_tasks: pre_install_checks.yml
+
+- name: Install required packages for oVirt Engine deployment
+ include_tasks: install_packages.yml
+ when: not ovirt_engine_setup_perform_upgrade
+
+- name: Run engine setup
+ include_tasks: engine_setup.yml
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/pre_install_checks.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/pre_install_checks.yml
new file mode 100644
index 00000000..275ff0cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/pre_install_checks.yml
@@ -0,0 +1,12 @@
+---
+- name: Gather facts on installed packages
+ package_facts:
+ manager: rpm
+ no_log: true
+
+- name: Fail when firewall manager is not installed
+ fail:
+ msg: '{{ ovirt_engine_setup_firewall_manager }} was chosen as a firewall manager but is not installed'
+ when:
+ - ovirt_engine_setup_firewall_manager not in ansible_facts.packages
+ - ovirt_engine_setup_firewall_manager is not none
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/restore_engine_from_file.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/restore_engine_from_file.yml
new file mode 100644
index 00000000..6761b5f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tasks/restore_engine_from_file.yml
@@ -0,0 +1,20 @@
+---
+- name: Run engine cleanup command
+ command: "engine-cleanup"
+ when: ovirt_engine_setup_restore_engine_cleanup
+
+- name: Add scopes to restore engine command
+ set_fact:
+ restore_cmd: "{{ restore_cmd }} --scope={{ item }}"
+ with_items: "{{ ovirt_engine_setup_restore_scopes | default([]) }}"
+
+- name: Add restore file and restore options
+ set_fact:
+ restore_cmd: "{{ restore_cmd }} --{{ item.key }}{% if item.value %}={{ item.value }}{% endif %}"
+ with_dict:
+ - file: "{{ ovirt_engine_setup_restore_file }}"
+ - "{{ ovirt_engine_setup_restore_options | default({}) }}"
+
+- name: Run restore engine from backup file
+ command: "{{ restore_cmd }}"
+ changed_when: true
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_basic.txt.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_basic.txt.j2
new file mode 100644
index 00000000..adf48ee4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_basic.txt.j2
@@ -0,0 +1,3 @@
+{% include "./templates/basic_answerfile.txt.j2" %}
+OVESETUP_DB/engineVacuumFull=bool:{{ ovirt_engine_setup_engine_vacuum_full }}
+OVESETUP_DB/dwhVacuumFull=bool:{{ ovirt_engine_setup_dwh_vacuum_full }}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_upgrade.txt.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_upgrade.txt.j2
new file mode 100644
index 00000000..a9380c83
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.1_upgrade.txt.j2
@@ -0,0 +1 @@
+{% include "./templates/basic_answerfile.txt.j2" %}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_basic.txt.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_basic.txt.j2
new file mode 100644
index 00000000..b39bf0a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_basic.txt.j2
@@ -0,0 +1,14 @@
+{% include "./templates/basic_answerfile.txt.j2" %}
+OVESETUP_DB/engineVacuumFull=bool:{{ ovirt_engine_setup_engine_vacuum_full }}
+OVESETUP_DB/dwhVacuumFull=bool:{{ ovirt_engine_setup_dwh_vacuum_full }}
+{% if ovirt_engine_setup_provider_ovn_configure is defined and ovirt_engine_setup_provider_ovn_configure %}
+OVESETUP_OVN/ovirtProviderOvn=bool:True
+OVESETUP_OVN/ovirtProviderOvnUser=str:{{ ovirt_engine_setup_provider_ovn_username }}
+{% else %}
+OVESETUP_OVN/ovirtProviderOvn=bool:False
+{% endif %}
+{% if ovirt_engine_setup_provider_ovn_password is defined %}
+OVESETUP_OVN/ovirtProviderOvnPassword=str:{{ ovirt_engine_setup_provider_ovn_password }}
+{% else %}
+OVESETUP_OVN/ovirtProviderOvnPassword=none:None
+{% endif %}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_upgrade.txt.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_upgrade.txt.j2
new file mode 100644
index 00000000..86e6d920
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.2_upgrade.txt.j2
@@ -0,0 +1,16 @@
+{% include "./templates/basic_answerfile.txt.j2" %}
+{% if ovirt_engine_setup_provider_ovn_configure is defined and ovirt_engine_setup_provider_ovn_configure %}
+OVESETUP_DB/engineVacuumFull=bool:True
+OVESETUP_OVN/ovirtProviderOvn=bool:True
+OVESETUP_OVN/ovirtProviderOvnUser=str:{{ ovirt_engine_setup_provider_ovn_username }}
+{% else %}
+OVESETUP_OVN/ovirtProviderOvn=bool:False
+{% endif %}
+{% if ovirt_engine_setup_provider_ovn_password is defined %}
+OVESETUP_OVN/ovirtProviderOvnPassword=str:{{ ovirt_engine_setup_provider_ovn_password }}
+{% else %}
+OVESETUP_OVN/ovirtProviderOvnPassword=none:None
+{% endif %}
+{% if ovirt_engine_setup_wait_running_tasks is defined %}
+QUESTION/1/OVESETUP_WAIT_RUNNING_TASKS=str:{{ ovirt_engine_setup_wait_running_tasks | ternary('yes','no') }}
+{% endif %}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_basic.txt.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_basic.txt.j2
new file mode 100644
index 00000000..d3280daa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_basic.txt.j2
@@ -0,0 +1,11 @@
+{% include "./templates/answerfile_4.2_basic.txt.j2" %}
+{% if ovirt_engine_cinderlib_enable is defined %}
+QUESTION/1/ovirt-cinderlib-enable=str:{{ ovirt_engine_cinderlib_enable | ternary('yes','no') }}
+{% endif %}
+{% if ovirt_engine_apache_config_root_redirection is defined %}
+QUESTION/1/OVESETUP_APACHE_CONFIG_ROOT_REDIRECTION=str:{{ ovirt_engine_apache_config_root_redirection | ternary('yes','no') }}
+{% endif %}
+{% if ovirt_engine_apache_config_ssl is defined %}
+QUESTION/1/OVESETUP_APACHE_CONFIG_SSL=str:{{ ovirt_engine_apache_config_ssl | ternary('automatic','manual') }}
+{% endif %}
+QUESTION/1/OVESETUP_IGNORE_SNAPSHOTS_WITH_OLD_COMPAT_LEVEL=str:yes
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_upgrade.txt.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_upgrade.txt.j2
new file mode 100644
index 00000000..46771fe0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.3_upgrade.txt.j2
@@ -0,0 +1 @@
+{% include "./templates/answerfile_4.2_upgrade.txt.j2" %}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_basic.txt.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_basic.txt.j2
new file mode 100644
index 00000000..e7776353
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_basic.txt.j2
@@ -0,0 +1,3 @@
+{% include "./templates/answerfile_4.3_basic.txt.j2" %}
+
+OVESETUP_GRAFANA_CORE/enable=bool:{{ ovirt_engine_grafana_enable }}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_upgrade.txt.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_upgrade.txt.j2
new file mode 100644
index 00000000..59d6568f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/answerfile_4.4_upgrade.txt.j2
@@ -0,0 +1 @@
+{% include "./templates/answerfile_4.3_upgrade.txt.j2" %}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/basic_answerfile.txt.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/basic_answerfile.txt.j2
new file mode 100644
index 00000000..c4667d35
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/templates/basic_answerfile.txt.j2
@@ -0,0 +1,64 @@
+[environment:default]
+{% if ovirt_engine_setup_fqdn is defined %}
+OVESETUP_CONFIG/fqdn=str:{{ ovirt_engine_setup_fqdn }}
+{% endif %}
+{% if ovirt_engine_setup_firewall_manager %}
+OVESETUP_CONFIG/updateFirewall=bool:True
+OVESETUP_CONFIG/firewallManager=str:{{ ovirt_engine_setup_firewall_manager }}
+{% else %}
+OVESETUP_CONFIG/updateFirewall=bool:False
+OVESETUP_CONFIG/firewallManager=none:None
+{% endif %}
+{% if ovirt_engine_setup_require_rollback is defined %}
+OSETUP_RPMDISTRO/requireRollback=bool:{{ovirt_engine_setup_require_rollback}}
+{% else %}
+OSETUP_RPMDISTRO/requireRollback=none:None
+{% endif %}
+OVESETUP_DB/host=str:{{ovirt_engine_setup_db_host}}
+OVESETUP_DB/user=str:{{ovirt_engine_setup_db_user}}
+OVESETUP_SYSTEM/memCheckEnabled=bool:False
+{% if ovirt_engine_setup_db_password is defined %}
+OVESETUP_DB/password=str:{{ovirt_engine_setup_db_password}}
+{% else %}
+OVESETUP_DB/password=none:None
+{% endif %}
+OVESETUP_DB/database=str:{{ovirt_engine_setup_db_name}}
+OVESETUP_DB/port=int:{{ovirt_engine_setup_db_port}}
+{% if ovirt_engine_setup_dwh_db_configure is defined and ovirt_engine_setup_dwh_db_configure %}
+OVESETUP_DWH_DB/secured=bool:False
+OVESETUP_DWH_DB/host=str:{{ovirt_engine_setup_dwh_db_host}}
+OVESETUP_DWH_DB/user=str:{{ovirt_engine_setup_dwh_db_user}}
+OVESETUP_DWH_DB/database=str:{{ovirt_engine_setup_dwh_db_name}}
+OVESETUP_DWH_DB/port=int:{{ovirt_engine_setup_dwh_db_port}}
+{% else %}
+OVESETUP_DWH_DB/secured=none:None
+OVESETUP_DWH_DB/host=none:None
+OVESETUP_DWH_DB/user=none:None
+OVESETUP_DWH_DB/password=none:None
+OVESETUP_DWH_DB/database=none:None
+OVESETUP_DWH_DB/port=none:None
+{% endif %}
+{% if ovirt_engine_setup_dwh_db_password is defined %}
+OVESETUP_DWH_DB/password=str:{{ovirt_engine_setup_dwh_db_password}}
+{% else %}
+OVESETUP_DWH_DB/password=none:None
+{% endif %}
+{% if ovirt_engine_setup_dwh_db_configure is defined and ovirt_engine_setup_dwh_db_configure %}
+OVESETUP_DWH_DB/securedHostValidation=bool:False
+{% else %}
+OVESETUP_DWH_DB/securedHostValidation=none:None
+{% endif %}
+{% if ovirt_engine_setup_organization is defined %}
+OVESETUP_PKI/organization=str:{{ ovirt_engine_setup_organization }}
+{% endif %}
+OVESETUP_CONFIG/adminPassword=str:{{ ovirt_engine_setup_admin_password }}
+{% if ovirt_engine_setup_dwh_db_configure is defined and ovirt_engine_setup_dwh_db_configure %}
+OVESETUP_DWH_CORE/enable=bool:True
+{% else %}
+OVESETUP_DWH_CORE/enable=bool:False
+{% endif %}
+{% if ovirt_engine_setup_dwh_db_configure is defined and ovirt_engine_setup_dwh_db_configure %}
+OVESETUP_DWH_PROVISIONING/postgresProvisioningEnabled=bool:True
+{% else %}
+OVESETUP_DWH_PROVISIONING/postgresProvisioningEnabled=bool:False
+{% endif %}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/containers-deploy.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/containers-deploy.yml
new file mode 100644
index 00000000..f40955a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/containers-deploy.yml
@@ -0,0 +1,17 @@
+---
+- name: Bring up docker containers
+ hosts: localhost
+ gather_facts: false
+ roles:
+ - role: provision_docker
+ provision_docker_inventory_group: "{{ groups['engine'] }}"
+
+- name: "Update python because of ovirt-imageio-proxy"
+ hosts: engine
+ tasks:
+ - name: Update python
+ yum:
+ name: python-libs
+ state: latest
+ tags:
+ - skip_ansible_lint # ANSIBLE0010
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-deploy.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-deploy.yml
new file mode 100644
index 00000000..3a746db1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-deploy.yml
@@ -0,0 +1,17 @@
+---
+- name: Run ovirt-ansible roles on containerized environments
+ hosts: engine
+ vars_files:
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: "{{ ovirt_engine_setup_version }}"
+ ovirt_engine_setup_organization: "example.com"
+ ovirt_engine_setup_dwh_db_configure: true
+ ovirt_engine_setup_dwh_db_host: "localhost"
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_repositories_ovirt_release_rpm: "{{ ovirt_release_rpm }}"
+ roles:
+ - role: repositories
+ - role: engine_setup
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-upgrade.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-upgrade.yml
new file mode 100644
index 00000000..87c5f06c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/engine-upgrade.yml
@@ -0,0 +1,17 @@
+---
+- name: Run ovirt-ansible roles on containerized environments
+ hosts: engine
+ vars_files:
+ - passwords.yml
+ vars:
+ ovirt_engine_setup_version: "{{ ovirt_engine_setup_version }}"
+ ovirt_engine_setup_organization: "example.com"
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_engine_setup_dwh_db_configure: false
+ ovirt_repositories_ovirt_release_rpm: "{{ ovirt_release_rpm }}"
+ roles:
+ - role: repositories
+ - role: engine_setup
+ ovirt_engine_setup_answer_file_path: "answerfile_{{ ovirt_engine_setup_version }}_upgrade.txt.j2"
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/inventory b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/inventory
new file mode 100644
index 00000000..9a315cac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/inventory
@@ -0,0 +1,4 @@
+localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python"
+
+[engine]
+engine_centos7 image="katerinak/c7-systemd-utf8"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/passwords.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/passwords.yml
new file mode 100644
index 00000000..80d6df40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+ovirt_engine_setup_admin_password: 123456
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/requirements.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/requirements.yml
new file mode 100644
index 00000000..159e73f9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/requirements.yml
@@ -0,0 +1,4 @@
+---
+- src: chrismeyersfsu.provision_docker
+ name: provision_docker
+- src: oVirt.repositories
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-4.2.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-4.2.yml
new file mode 100644
index 00000000..20634634
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-4.2.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: containers-deploy.yml
+- import_playbook: engine-deploy.yml
+ vars:
+ ovirt_engine_setup_version: "4.2"
+ ovirt_release_rpm: "http://plain.resources.ovirt.org/pub/yum-repo/ovirt-release42.rpm"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-master.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-master.yml
new file mode 100644
index 00000000..55919dbf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-master.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: containers-deploy.yml
+- import_playbook: engine-deploy.yml
+ vars:
+ ovirt_engine_setup_version: "4.4"
+ ovirt_release_rpm: "http://plain.resources.ovirt.org/pub/yum-repo/ovirt-release-master.rpm"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-upgrade-4.2-to-master.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-upgrade-4.2-to-master.yml
new file mode 100644
index 00000000..32516116
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/tests/test-upgrade-4.2-to-master.yml
@@ -0,0 +1,10 @@
+---
+- import_playbook: containers-deploy.yml
+- import_playbook: engine-deploy.yml
+ vars:
+ ovirt_engine_setup_version: "4.2"
+ ovirt_release_rpm: "http://plain.resources.ovirt.org/pub/yum-repo/ovirt-release42.rpm"
+- import_playbook: engine-upgrade.yml
+ vars:
+ ovirt_engine_setup_version: "4.3"
+ ovirt_release_rpm: "http://plain.resources.ovirt.org/pub/yum-repo/ovirt-release-master.rpm"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/vars/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/vars/main.yml
new file mode 100644
index 00000000..e5e72725
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/engine_setup/vars/main.yml
@@ -0,0 +1,2 @@
+---
+restore_cmd: 'engine-backup --mode=restore'
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/README.md
new file mode 100644
index 00000000..285586c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/README.md
@@ -0,0 +1,376 @@
+# ovirt-ansible-hosted-engine-setup
+
+Ansible role for deploying oVirt Hosted-Engine
+
+# Prerequisites
+
+* A fully qualified domain name prepared for your Engine and the host. Forward and reverse lookup records must both be set in the DNS.
+* `/var/tmp` has at least 5 GB of free space.
+* Unless you are using Gluster, you must have prepared storage for your Hosted-Engine environment (choose one):
+ * [Prepare NFS Storage](https://ovirt.org/documentation/admin-guide/chap-Storage/#preparing-nfs-storage)
+ * [Prepare ISCSI Storage](https://ovirt.org/documentation/admin-guide/chap-Storage/#preparing-iscsi-storage)
+
+# Role variables
+
+## General Variables
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| he_bridge_if | eth0 | The network interface ovirt management bridge will be configured on |
+| he_fqdn | null | The engine FQDN as it configured on the DNS |
+| he_mem_size_MB | max | The amount of memory used on the engine VM |
+| he_reserved_memory_MB | 512 | The amount of memory reserved for the host |
+| he_vcpus | max | The amount of CPUs used on the engine VM |
+| he_disk_size_GB | 61 | Disk size of the engine VM |
+| he_vm_mac_addr | null | MAC address of the engine vm network interface. |
+| he_domain_type | null | Storage domain type. available options: *nfs*, *iscsi*, *glusterfs*, *fc* |
+| he_storage_domain_addr | null | Storage domain IP/DNS address |
+| he_ansible_host_name | localhost | hostname in use on the first HE host (not necessarily the Ansible controller one) |
+| he_restore_from_file | null | a backup file created with engine-backup to be restored on the fly |
+| he_pki_renew_on_restore | false | Renew engine PKI on restore if needed |
+| he_cluster | Default | name of the cluster with hosted-engine hosts |
+| he_cluster_cpu_type | null | cluster CPU type to be used in hosted-engine cluster (the same as HE host or lower) |
+| he_cluster_comp_version | null | Compatibility version of the hosted-engine cluster. Default value is the latest compatibility version |
+| he_data_center | Default | name of the datacenter with hosted-engine hosts |
+| he_data_center_comp_version | null | Compatibility version of the hosted-engine data center. Default value is the latest compatibility version |
+| he_host_name | $(hostname -f) | name used by the engine for the first host |
+| he_host_address | $(hostname -f) | address used by the engine for the first host |
+| he_bridge_if | null | interface used for the management bridge |
+| he_apply_openscap_profile | false | apply a default OpenSCAP security profile on HE VM |
+| he_network_test | dns | the way of the network connectivity check performed by ovirt-hosted-engine-ha and ovirt-hosted-engine-setup, available options: *dns*, *ping*, *tcp* or *none*. |
+| he_tcp_t_address | null | hostname to connect if he_network_test is *tcp* |
+| he_tcp_t_port | null | port to connect if he_network_test is *tcp* |
+| he_pause_host | false | Pause the execution to let the user interactively fix host configuration |
+| he_pause_after_failed_add_host | true | Pause the execution if Add Host failed with status non_operational, to let the user interactively fix host configuration |
+| he_offline_deployment | false | If `True`, updates for all packages will be disabled |
+| he_additional_package_list | [] | List of additional packages to be installed on engine VM apart from ovirt-engine package |
+| he_debug_mode | false | If `True`, HE deployment will execute additional tasks for debug |
+| he_db_password | UNDEF | Engine database password |
+| he_dwh_db_password | UNDEF | DWH database password |
+
+## NFS / Gluster Variables
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| he_mount_options | '' | NFS mount options
+| he_storage_domain_path | null | shared folder path on NFS server |
+| he_nfs_version | auto | NFS version. available options: *auto*, *v4*, *v3*, *v4_0*, *v4_1*, *v4_2*
+| he_storage_if | null | the network interface name that is connected to the storage network, assumed to be pre-configured|
+
+
+## ISCSI Variables
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| he_iscsi_username | null | iscsi username |
+| he_iscsi_password | null | iscsi password |
+| he_iscsi_target | null | iscsi target |
+| he_lun_id | null | Lun ID |
+| he_iscsi_portal_port | null | iscsi portal port |
+| he_iscsi_portal_addr | null | iscsi portal address (just for interactive iSCSI discovery, use he_storage_domain_addr for the deployment) |
+| he_iscsi_tpgt | null | iscsi tpgt |
+| he_discard | false | Discard the whole disk space when removed. more info [here](https://ovirt.org/develop/release-management/features/storage/discard-after-delete/)
+
+## Static IP configuration Variables
+
+DHCP configuration is used on the engine VM by default. However, if you would like to use static ip instead,
+define the following variables:
+
+| Name | Default value | Description |
+|---------------------------------|-----------------------|-----------------------------------------------------------|
+| he_vm_ip_addr | null | engine VM ip address |
+| he_vm_ip_prefix | null | engine VM ip prefix |
+| he_dns_addr | null | engine VM DNS server |
+| he_default_gateway | null | engine VM default gateway |
+| he_vm_etc_hosts | false | Add engine VM ip and fqdn to /etc/hosts on the host |
+
+# Example Playbook
+This is a simple example for deploying Hosted-Engine with NFS storage domain.
+
+This role can be used to deploy on localhost (the ansible controller one) or on a remote host (please correctly set he_ansible_host_name).
+All the playbooks can be found inside the `examples/` folder.
+
+## hosted_engine_deploy_localhost.yml
+
+```yml
+---
+- name: Deploy oVirt hosted engine
+ hosts: localhost
+ connection: local
+ roles:
+ - role: hosted_engine_setup
+ collections:
+ - ovirt.ovirt
+```
+
+## hosted_engine_deploy_remotehost.yml
+
+```yml
+---
+- name: Deploy oVirt hosted engine
+ hosts: host123.localdomain
+ roles:
+ - role: hosted_engine_setup
+ collections:
+ - ovirt.ovirt
+```
+
+## passwords.yml
+
+```yml
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+he_appliance_password: 123456
+he_admin_password: 123456
+```
+
+## Example 1: extra vars for NFS deployment with DHCP - he_deployment.json
+
+```json
+{
+ "he_bridge_if": "eth0",
+ "he_fqdn": "he-engine.example.com",
+ "he_vm_mac_addr": "00:a5:3f:66:ba:12",
+ "he_domain_type": "nfs",
+ "he_storage_domain_addr": "192.168.100.50",
+ "he_storage_domain_path": "/var/nfs_folder"
+}
+```
+
+## Example 2: extra vars for iSCSI deployment with static IP, remote host - he_deployment_remote.json
+
+```json
+{
+ "he_bridge_if": "eth0",
+ "he_fqdn": "he-engine.example.com",
+ "he_vm_ip_addr": "192.168.1.214",
+ "he_vm_ip_prefix": "24",
+ "he_gateway": "192.168.1.1",
+ "he_dns_addr": "192.168.1.1",
+ "he_vm_etc_hosts": true,
+ "he_vm_mac_addr": "00:a5:3f:66:ba:12",
+ "he_domain_type": "iscsi",
+ "he_storage_domain_addr": "192.168.1.125",
+ "he_iscsi_portal_port": "3260",
+ "he_iscsi_tpgt": "1",
+ "he_iscsi_target": "iqn.2017-10.com.redhat.stirabos:he",
+ "he_lun_id": "36589cfc000000e8a909165bdfb47b3d9",
+ "he_mem_size_MB": "4096",
+ "he_ansible_host_name": "host123.localdomain"
+}
+```
+
+### Test iSCSI connectivity and get LUN WWID before deploying
+
+```
+[root@c75he20180820h1 ~]# iscsiadm -m node --targetname iqn.2017-10.com.redhat.stirabos:he -p 192.168.1.125:3260 -l
+[root@c75he20180820h1 ~]# iscsiadm -m session -P3
+iSCSI Transport Class version 2.0-870
+version 6.2.0.874-7
+Target: iqn.2017-10.com.redhat.stirabos:data (non-flash)
+ Current Portal: 192.168.1.125:3260,1
+ Persistent Portal: 192.168.1.125:3260,1
+ **********
+ Interface:
+ **********
+ Iface Name: default
+ Iface Transport: tcp
+ Iface Initiatorname: iqn.1994-05.com.redhat:6a4517b3773a
+ Iface IPaddress: 192.168.1.14
+ Iface HWaddress: <empty>
+ Iface Netdev: <empty>
+ SID: 1
+ iSCSI Connection State: LOGGED IN
+ iSCSI Session State: LOGGED_IN
+ Internal iscsid Session State: NO CHANGE
+ *********
+ Timeouts:
+ *********
+ Recovery Timeout: 5
+ Target Reset Timeout: 30
+ LUN Reset Timeout: 30
+ Abort Timeout: 15
+ *****
+ CHAP:
+ *****
+ username: <empty>
+ password: ********
+ username_in: <empty>
+ password_in: ********
+ ************************
+ Negotiated iSCSI params:
+ ************************
+ HeaderDigest: None
+ DataDigest: None
+ MaxRecvDataSegmentLength: 262144
+ MaxXmitDataSegmentLength: 131072
+ FirstBurstLength: 131072
+ MaxBurstLength: 16776192
+ ImmediateData: Yes
+ InitialR2T: Yes
+ MaxOutstandingR2T: 1
+ ************************
+ Attached SCSI devices:
+ ************************
+ Host Number: 3 State: running
+ scsi3 Channel 00 Id 0 Lun: 2
+ Attached scsi disk sdb State: running
+ scsi3 Channel 00 Id 0 Lun: 3
+ Attached scsi disk sdc State: running
+Target: iqn.2017-10.com.redhat.stirabos:he (non-flash)
+ Current Portal: 192.168.1.125:3260,1
+ Persistent Portal: 192.168.1.125:3260,1
+ **********
+ Interface:
+ **********
+ Iface Name: default
+ Iface Transport: tcp
+ Iface Initiatorname: iqn.1994-05.com.redhat:6a4517b3773a
+ Iface IPaddress: 192.168.1.14
+ Iface HWaddress: <empty>
+ Iface Netdev: <empty>
+ SID: 4
+ iSCSI Connection State: LOGGED IN
+ iSCSI Session State: LOGGED_IN
+ Internal iscsid Session State: NO CHANGE
+ *********
+ Timeouts:
+ *********
+ Recovery Timeout: 5
+ Target Reset Timeout: 30
+ LUN Reset Timeout: 30
+ Abort Timeout: 15
+ *****
+ CHAP:
+ *****
+ username: <empty>
+ password: ********
+ username_in: <empty>
+ password_in: ********
+ ************************
+ Negotiated iSCSI params:
+ ************************
+ HeaderDigest: None
+ DataDigest: None
+ MaxRecvDataSegmentLength: 262144
+ MaxXmitDataSegmentLength: 131072
+ FirstBurstLength: 131072
+ MaxBurstLength: 16776192
+ ImmediateData: Yes
+ InitialR2T: Yes
+ MaxOutstandingR2T: 1
+ ************************
+ Attached SCSI devices:
+ ************************
+ Host Number: 6 State: running
+ scsi6 Channel 00 Id 0 Lun: 0
+ Attached scsi disk sdd State: running
+ scsi6 Channel 00 Id 0 Lun: 1
+ Attached scsi disk sde State: running
+[root@c75he20180820h1 ~]# lsblk /dev/sdd
+NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
+sdd 8:48 0 100G 0 disk
+└─36589cfc000000e8a909165bdfb47b3d9 253:10 0 100G 0 mpath
+[root@c75he20180820h1 ~]# lsblk /dev/sde
+NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
+sde 8:64 0 10G 0 disk
+└─36589cfc000000ab67ee1427370d68436 253:0 0 10G 0 mpath
+[root@c75he20180820h1 ~]# /lib/udev/scsi_id --page=0x83 --whitelisted --device=/dev/sdd
+36589cfc000000e8a909165bdfb47b3d9
+[root@c75he20180820h1 ~]# iscsiadm -m node --targetname iqn.2017-10.com.redhat.stirabos:he -p 192.168.1.125:3260 -u
+Logging out of session [sid: 4, target: iqn.2017-10.com.redhat.stirabos:he, portal: 192.168.1.125,3260]
+Logout of [sid: 4, target: iqn.2017-10.com.redhat.stirabos:he, portal: 192.168.1.125,3260] successful.
+```
+
+# Usage
+1. Check all the prerequisites and requirements are met.
+2. Encrypt passwords.yml
+```sh
+$ ansible-vault encrypt passwords.yml
+```
+
+3. Execute the playbook
+
+Local deployment:
+```sh
+$ ansible-playbook hosted_engine_deploy.yml --extra-vars='@he_deployment.json' --extra-vars='@passwords.yml' --ask-vault-pass
+```
+
+Deployment over a remote host:
+```sh
+ansible-playbook -i host123.localdomain, hosted_engine_deploy.yml --extra-vars='@he_deployment.json' --extra-vars='@passwords.yml' --ask-vault-pass
+```
+
+Deploy over a remote host from Ansible AWX/Tower
+---
+
+The flow creates a temporary VM with a running engine to use for configuring and bootstrapping the whole environment.
+The bootstrap engine VM runs over libvirt natted network so, in that stage, is not reachable from outside the host where it's running on.
+
+When the role dynamically adds the freshly created engine VM to the inventory, it also configures the host to be used as an ssh proxy and this perfectly works directly running the playbook with ansible-playbook.
+On the other side, Ansible AWX/Tower by defaults relies on PRoot to isolate jobs and so the credentials supplied by AWX/Tower will not flow to the jump host configured with ProxyCommand.
+
+[This can be avoided disabling job isolation in AWX/Tower](https://docs.ansible.com/ansible-tower/latest/html/administration/tipsandtricks.html#setting-up-a-jump-host-to-use-with-tower)
+
+Please notice that *job isolation* can be configured system wise but not only for the HE deploy job and so it's not a recommended practice on production environments.
+
+Deployment time improvements
+---
+
+To significantly reduce the amount of time it takes to deploy a hosted engine __over a remote host__, add the following lines to `/etc/ansible/ansible.cfg` under the `[ssh_connection]` section:
+
+```
+ssh_args = -C -o ControlMaster=auto -o ControlPersist=30m
+control_path_dir = /root/cp
+control_path = %(directory)s/%%h-%%r
+pipelining = True
+```
+
+Make changes in the engine VM during the deployment
+---
+In some cases, a user may want to make adjustments to the engine VM
+during the deployment process. There are 2 ways to do that:
+
+**Automatic:**
+
+Write ansible playbooks that will run on the engine VM before or after the engine VM installation.
+
+You can add the playbooks to the following locations:
+
+- ```hooks/enginevm_before_engine_setup```: These will be ran before running engine-setup on the engine machine.
+
+- ```hooks/enginevm_after_engine_setup```: These will be ran after running engine-setup on the engine machine.
+
+- ```hooks/after_add_host```: These will be ran after adding the host to the engine, but before checking if it is up. You can place here playbooks to customize the host, such as configuring required networks, and then activate it, so that deployment will find it as "Up" and continue successfully. See examples/required_networks_fix.yml for an example.
+
+These playbooks will be consumed automatically by the role when you execute it.
+
+**Manual:**
+
+To make manual adjustments you can set the variable ```he_pause_host``` to true. This will pause the deployment after the engine has been setup and create a lock-file at /tmp that ends with ```_he_setup_lock``` on the machine the role was executed on. The deployment will continue after deleting the lock-file, or after 24 hours ( if the lock-file hasn't been removed ).
+
+In order to proceed with the deployment, before deleting the lock-file, make sure that the host is on 'up' state at the engine's URL.
+
+Both of the lock-file path and the engine's URL will be presented during the role execution.
+
+**On Failure**
+
+If "Add Host" failed and left the host in status "non_operational", by default the deployment will be paused, similarly to "Manual" above, so that the user can try to fix the host to get it to "up" state, before removing the lock file and continuing. If you want the process to fail instead of pausing, set `he_pause_after_failed_add_host` to false.
+
+Demo
+----
+Here a demo showing a deployment on NFS configuring the engine VM with static IP.
+[![asciicast](https://asciinema.org/a/205639.png)](https://asciinema.org/a/205639)
+
+# License
+
+Apache License 2.0
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/defaults/main.yml
new file mode 100644
index 00000000..7334f2cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/defaults/main.yml
@@ -0,0 +1,112 @@
+---
+# Default vars
+# Do not change these variables
+# Changes in this section are NOT supported
+
+he_cmd_lang:
+ LANGUAGE: en_US.UTF-8
+ LANG: en_US.UTF-8
+ LC_MESSAGES: en_US.UTF-8
+ LC_ALL: en_US.UTF-8
+
+he_vm_name: HostedEngine
+he_data_center: Default
+he_cluster: Default
+he_local_vm_dir_path: /var/tmp
+he_local_vm_dir_prefix: localvm
+he_appliance_ova: ''
+he_root_ssh_pubkey: ''
+he_root_ssh_access: 'yes'
+he_apply_openscap_profile: false
+he_cdrom: ''
+he_console_type: vnc
+he_video_device: vga
+he_graphic_device: vnc
+he_emulated_machine: pc
+he_minimal_mem_size_MB: 4096
+he_minimal_disk_size_GB: 50
+he_mgmt_network: ovirtmgmt
+he_storage_domain_name: hosted_storage
+he_ansible_host_name: localhost
+he_ipv4_subnet_prefix: "192.168.222"
+he_ipv6_subnet_prefix: fd00:1234:5678:900
+he_webui_forward_port: 6900 # by default already open for VM console
+he_reserved_memory_MB: 512
+he_avail_memory_grace_MB: 200
+
+he_host_ip: null
+he_host_name: null
+he_host_address: null
+he_cloud_init_host_name: null
+he_cloud_init_domain_name: null
+
+he_smtp_port: 25
+he_smtp_server: localhost
+he_dest_email: root@localhost
+he_source_email: root@localhost
+
+he_force_ip4: false
+he_force_ip6: false
+
+he_pause_host: false
+he_pause_after_failed_add_host: true
+he_debug_mode: false
+
+## Mandatory variables:
+
+he_bridge_if: null
+he_fqdn: null
+he_mem_size_MB: max
+he_vcpus: max
+he_disk_size_GB: 61
+
+he_enable_libgfapi: false
+he_enable_hc_gluster_service: false
+he_vm_mac_addr: null
+he_remove_appliance_rpm: true
+he_pki_renew_on_restore: false
+
+## Storage domain vars:
+he_domain_type: null # can be: nfs | iscsi | glusterfs | fc
+he_storage_domain_addr: null
+
+## NFS vars:
+## Defaults are null, user should specify if NFS is chosen
+he_mount_options: ''
+he_storage_domain_path: null
+he_nfs_version: auto # can be: auto, v4 or v3
+he_storage_if: null
+
+## ISCSI vars:
+## Defaults are null, user should specify if ISCSI is chosen
+he_iscsi_username: null
+he_iscsi_password: null
+he_iscsi_discover_username: null
+he_iscsi_discover_password: null
+he_iscsi_target: null
+he_lun_id: null
+he_iscsi_portal_port: null
+he_iscsi_portal_addr: null
+he_iscsi_tpgt: null
+he_discard: false
+
+# Define if using STATIC ip configuration
+he_vm_ip_addr: null
+he_vm_ip_prefix: null
+he_dns_addr: null # up to 3 DNS servers IPs can be added
+he_vm_etc_hosts: false # user can add lines to /etc/hosts on the engine VM
+he_default_gateway: null
+he_network_test: 'dns' # can be: 'dns', 'ping', 'tcp' or 'none'
+he_tcp_t_address: null
+he_tcp_t_port: null
+
+# ovirt-hosted-engine-setup variables
+he_just_collect_network_interfaces: false
+he_libvirt_authfile: '/etc/ovirt-hosted-engine/virsh_auth.conf'
+he_offline_deployment: false
+he_additional_package_list: []
+
+# *** Do Not Use On Production Environment ***
+# ********** Used for testing ONLY ***********
+he_requirements_check_enabled: true
+he_memory_requirements_check_enabled: true
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_localhost.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_localhost.yml
new file mode 100644
index 00000000..53f72801
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_localhost.yml
@@ -0,0 +1,8 @@
+---
+- name: Deploy oVirt hosted engine
+ hosts: localhost
+ connection: local
+ roles:
+ - role: hosted_engine_setup
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_remotehost.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_remotehost.yml
new file mode 100644
index 00000000..51aaa0e1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/hosted_engine_deploy_remotehost.yml
@@ -0,0 +1,7 @@
+---
+- name: Deploy oVirt hosted engine
+ hosts: host123.localdomain
+ roles:
+ - role: hosted_engine_setup
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/iscsi_deployment_remote.json b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/iscsi_deployment_remote.json
new file mode 100644
index 00000000..d8df1baf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/iscsi_deployment_remote.json
@@ -0,0 +1,18 @@
+{
+ "he_bridge_if": "eth0",
+ "he_fqdn": "he-engine.example.com",
+ "he_vm_ip_addr": "192.168.1.214",
+ "he_vm_ip_prefix": "24",
+ "he_gateway": "192.168.1.1",
+ "he_dns_addr": "192.168.1.1",
+ "he_vm_etc_hosts": true,
+ "he_vm_mac_addr": "00:a5:3f:66:ba:12",
+ "he_domain_type": "iscsi",
+ "he_storage_domain_addr": "192.168.1.125",
+ "he_iscsi_portal_port": "3260",
+ "he_iscsi_tpgt": "1",
+ "he_iscsi_target": "iqn.2017-10.com.redhat.stirabos:he",
+ "he_lun_id": "36589cfc000000e8a909165bdfb47b3d9",
+ "he_mem_size_MB": "4096",
+ "he_ansible_host_name": "host123.localdomain"
+}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/nfs_deployment.json b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/nfs_deployment.json
new file mode 100644
index 00000000..edd982e7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/nfs_deployment.json
@@ -0,0 +1,8 @@
+{
+ "he_bridge_if": "eth0",
+ "he_fqdn": "he-engine.example.com",
+ "he_vm_mac_addr": "00:a5:3f:66:ba:12",
+ "he_domain_type": "nfs",
+ "he_storage_domain_addr": "192.168.100.50",
+ "he_storage_domain_path": "/var/nfs_folder"
+}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/passwords.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/passwords.yml
new file mode 100644
index 00000000..078194a3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/passwords.yml
@@ -0,0 +1,13 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+he_appliance_password: 123456
+he_admin_password: 123456
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/required_networks_fix.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/required_networks_fix.yml
new file mode 100644
index 00000000..a1d15f3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/examples/required_networks_fix.yml
@@ -0,0 +1,56 @@
+---
+# This is an example for a hook to fix restore-from-file errors
+# due to missing required networks.
+#
+# If you have an existing hosted-engine setup, and the Default cluster
+# has some required network, then if you take a backup and try to restore
+# it with '--restore-from-file', the deployment process cannot know which
+# host nic should be attached to the required network, and so activating
+# the host will fail. This will prompt the user to manually handle the
+# situation via the engine web admin UI.
+#
+# If you already know that beforehand, and want to automate restoration,
+# you can copy current file, edit as needed, and place it in:
+# /usr/share/ansible/collections/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_add_host/
+# File name should end with '.yml'.
+#
+# For more details, see (also):
+# https://docs.ansible.com/ansible/latest/collections/ovirt/ovirt/ovirt_host_network_module.html
+# https://docs.ansible.com/ansible/latest/collections/ovirt/ovirt/ovirt_host_module.html
+
+- include_tasks: auth_sso.yml
+
+- name: Wait for the host to be up
+ ovirt_host_info:
+ pattern: name=myhost
+ auth: "{{ ovirt_auth }}"
+ register: host_result_up_check
+ until: >-
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ (
+ host_result_up_check.ovirt_hosts[0].status == 'up' or
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
+ )
+ retries: 120
+ delay: 10
+ ignore_errors: true
+
+- name: Handle non_operational myhost
+ block:
+ - name: Attach interface eth0 on host myhost to network net1
+ ovirt.ovirt.ovirt_host_network:
+ auth: "{{ ovirt_auth }}"
+ name: myhost
+ interface: eth0
+ networks:
+ - name: net1
+ - name: Activate host myhost
+ ovirt.ovirt.ovirt_host:
+ auth: "{{ ovirt_auth }}"
+ name: myhost
+ state: present
+ when: >-
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_add_host/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_add_host/README.md
new file mode 100644
index 00000000..54daacbe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_add_host/README.md
@@ -0,0 +1,3 @@
+# USAGE
+
+Place here playbooks to be executed after trying to add the host to the engine.
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/README.md
new file mode 100644
index 00000000..9ef275a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/README.md
@@ -0,0 +1,3 @@
+# USAGE
+
+Place here playbooks to be executed after hosted-engine-setup finishes. \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/add_host_storage_domain.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/add_host_storage_domain.yml
new file mode 100644
index 00000000..a4599164
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/after_setup/add_host_storage_domain.yml
@@ -0,0 +1,53 @@
+---
+- name: Include Host vars
+ include_vars: "{{ file_item }}"
+ with_fileglob: "/usr/share/ovirt-hosted-engine-setup/gdeploy-inventory.yml"
+ loop_control:
+ loop_var: file_item
+- debug: var=gluster
+
+- name: Set Engine public key as authorized key without validating the TLS/SSL certificates
+ connection: ssh
+ authorized_key:
+ user: root
+ state: present
+ key: https://{{ he_fqdn }}/ovirt-engine/services/pki-resource?resource=engine-certificate&format=OPENSSH-PUBKEY
+ validate_certs: false
+ delegate_to: "{{ host }}"
+ with_items: "{{ gluster.hosts }}"
+ loop_control:
+ loop_var: host
+ when: "gluster is defined and 'hosts' in gluster"
+
+- name: Add additional gluster hosts to engine
+ async: 50
+ poll: 0
+ ignore_errors: true
+ ovirt_host:
+ cluster: "{{ he_cluster }}"
+ name: "{{ host }}"
+ address: "{{ host }}"
+ state: present
+ public_key: true
+ auth: "{{ ovirt_auth }}"
+ hosted_engine: deploy
+ with_items: "{{ gluster.hosts }}"
+ loop_control:
+ loop_var: host
+ when: "gluster is defined and 'hosts' in gluster and gluster.hosts | length > 1"
+
+- name: "Add additional glusterfs storage domains"
+ ignore_errors: true
+ ovirt_storage_domain:
+ name: "{{ sd.name }}"
+ host: "{{ he_host_name }}"
+ auth: "{{ ovirt_auth }}"
+ data_center: "{{ datacenter_name }}"
+ glusterfs:
+ address: "{{ he_storage_domain_addr }}"
+ mount_options: "{{ sd.mount_options }}"
+ path: "{{ sd.path }}"
+ with_items: "{{ gluster.vars.storage_domains }}"
+ loop_control:
+ loop_var: sd
+ when: "gluster is defined and 'hosts' in gluster and 'vars' in gluster"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_after_engine_setup/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_after_engine_setup/README.md
new file mode 100644
index 00000000..8fcfda4a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_after_engine_setup/README.md
@@ -0,0 +1,3 @@
+# USAGE
+
+Place here playbooks to be executed on the engine VM after engine-setup finishes. \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_before_engine_setup/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_before_engine_setup/README.md
new file mode 100644
index 00000000..1bd04802
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/hooks/enginevm_before_engine_setup/README.md
@@ -0,0 +1,3 @@
+# USAGE
+
+Place here playbooks to be executed on the engine VM before engine-setup starts. \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/add_engine_as_ansible_host.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/add_engine_as_ansible_host.yml
new file mode 100644
index 00000000..90f13b8f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/add_engine_as_ansible_host.yml
@@ -0,0 +1,24 @@
+---
+- name: Add the engine VM as an ansible host
+ block:
+ - name: Fetch the value of HOST_KEY_CHECKING
+ set_fact: host_key_checking="{{ lookup('config', 'HOST_KEY_CHECKING') }}"
+ - debug: var=host_key_checking
+ - name: Get the username running the deploy
+ become: false
+ command: whoami
+ register: username_on_host
+ - name: Register the engine FQDN as a host
+ add_host:
+ name: "{{ he_fqdn }}"
+ groups: engine
+ ansible_connection: smart
+ ansible_ssh_extra_args: >-
+ -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {% if he_ansible_host_name != "localhost" %}
+ -o ProxyCommand="ssh -W %h:%p -q
+ {% if not host_key_checking %} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {% endif %}
+ {{ username_on_host.stdout }}@{{ he_ansible_host_name }}" {% endif %}
+ ansible_ssh_pass: "{{ he_appliance_password }}"
+ ansible_user: root
+ no_log: true
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/alter_libvirt_default_net_configuration.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/alter_libvirt_default_net_configuration.yml
new file mode 100644
index 00000000..ef813c31
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/alter_libvirt_default_net_configuration.yml
@@ -0,0 +1,99 @@
+---
+- name: Parse libvirt default network configuration
+ virt_net:
+ command: get_xml
+ name: default
+ register: default_net_xml
+- debug: var=default_net_xml.stdout
+
+- name: IPv6 configuration
+ block:
+ - name: Remove IPv4 configuration
+ xml:
+ xmlstring: "{{ default_net_xml.get_xml }}"
+ xpath: /network/ip
+ state: absent
+ register: editednet_noipv4
+ - name: Configure it as an isolated network
+ xml:
+ xmlstring: "{{ editednet_noipv4.xmlstring }}"
+ xpath: /network/forward
+ state: absent
+ register: editednet_isolated
+ - name: Edit libvirt default network configuration, set IPv6 address
+ xml:
+ xmlstring: "{{ editednet_isolated.xmlstring }}"
+ xpath: /network/ip[@family='ipv6']
+ attribute: address
+ value: "{{ he_ipv6_subnet_prefix + '::1' }}"
+ register: editednet1
+ - name: Edit libvirt default network configuration, set IPv6 prefix
+ xml:
+ xmlstring: "{{ editednet1.xmlstring }}"
+ xpath: /network/ip[@family='ipv6']
+ attribute: prefix
+ value: "64"
+ register: editednet2
+ - debug: var=editednet2
+ - name: Edit libvirt default network configuration, enable DHCPv6
+ xml:
+ xmlstring: "{{ editednet2.xmlstring }}"
+ xpath: /network/ip[@family='ipv6']/dhcp/range
+ attribute: start
+ value: "{{ he_ipv6_subnet_prefix + '::10' }}"
+ register: editednet3
+ - debug: var=editednet3
+ - name: Edit libvirt default network configuration, set DHCPv6 range
+ xml:
+ xmlstring: "{{ editednet3.xmlstring }}"
+ xpath: /network/ip[@family='ipv6']/dhcp/range
+ attribute: end
+ value: "{{ he_ipv6_subnet_prefix + '::ff' }}"
+ register: finaledit6
+ - debug: var=finaledit
+ when: ipv6_deployment|bool
+
+- name: IPv4 configuration
+ block:
+ - name: Edit libvirt default network configuration, change default address
+ xml:
+ xmlstring: "{{ default_net_xml.get_xml }}"
+ xpath: /network/ip
+ attribute: address
+ value: "{{ he_ipv4_subnet_prefix + '.1' }}"
+ register: editednet1
+ - name: Edit libvirt default network configuration, change DHCP start range
+ xml:
+ xmlstring: "{{ editednet1.xmlstring }}"
+ xpath: /network/ip/dhcp/range
+ attribute: start
+ value: "{{ he_ipv4_subnet_prefix + '.2' }}"
+ register: editednet2
+ - name: Edit libvirt default network configuration, change DHCP end range
+ xml:
+ xmlstring: "{{ editednet2.xmlstring }}"
+ xpath: /network/ip/dhcp/range
+ attribute: end
+ value: "{{ he_ipv4_subnet_prefix + '.254' }}"
+ register: finaledit4
+ when: not ipv6_deployment|bool
+
+- name: Update libvirt default network configuration, destroy
+ virt_net:
+ command: destroy
+ name: default
+- name: Update libvirt default network configuration, undefine
+ virt_net:
+ command: undefine
+ name: default
+ ignore_errors: true
+- name: Update libvirt default network configuration, define
+ virt_net:
+ command: define
+ name: default
+ xml: "{{ finaledit6.xmlstring if ipv6_deployment else finaledit4.xmlstring }}"
+- name: Activate default libvirt network
+ virt_net:
+ name: default
+ state: active
+ register: virt_net_out
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/apply_openscap_profile.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/apply_openscap_profile.yml
new file mode 100644
index 00000000..39bc4875
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/apply_openscap_profile.yml
@@ -0,0 +1,48 @@
+---
+- name: Initialize OpenSCAP variables
+ set_fact:
+ oscap_dir: "/usr/share/xml/scap/ssg/content"
+ oscap_dist: "{{ ansible_distribution | replace('RedHat', 'rhel') | lower }}"
+ oscap_ver: "{{ ansible_distribution_major_version if ansible_distribution != 'Fedora' else '' }}"
+- name: Set OpenSCAP datastream path
+ set_fact:
+ oscap_datastream: "{{ oscap_dir }}/ssg-{{ oscap_dist }}{{ oscap_ver }}-ds.xml"
+- debug: var=oscap_datastream
+- name: Verify OpenSCAP datastream
+ stat:
+ path: "{{ oscap_datastream }}"
+ register: oscap_ds_stat
+- name: Set default OpenSCAP profile
+ shell: >-
+ set -euo pipefail && oscap info --profiles {{ oscap_datastream }} |
+ grep -Ei "(standard|disa)" | sort | tail -1 | cut -d':' -f1
+ register: oscap_profile
+ changed_when: true
+ when: oscap_ds_stat.stat.exists
+- debug: var=oscap_profile
+- name: Apply OpenSCAP profile
+ command: >-
+ oscap xccdf eval --profile {{ oscap_profile.stdout }} --remediate
+ --report /root/openscap-report.html {{ oscap_datastream }}
+ failed_when: false
+ ignore_errors: true
+ changed_when: true
+- name: Reset PermitRootLogin for sshd
+ lineinfile: dest=/etc/ssh/sshd_config
+ regexp="^\s*PermitRootLogin"
+ line="PermitRootLogin yes"
+ state=present
+- name: Reboot the engine VM to ensure that FIPS is enabled
+ reboot:
+ reboot_timeout: 1200
+- block:
+ - name: Check if FIPS is enabled
+ command: sysctl -n crypto.fips_enabled
+ changed_when: true
+ register: he_fips_enabled
+ - debug: var=he_fips_enabled
+ - name: Enforce FIPS mode
+ fail:
+ msg: "FIPS mode is not enabled as required"
+ when: he_fips_enabled.stdout != "1"
+ when: ansible_distribution is search("RedHat")
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_revoke.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_revoke.yml
new file mode 100644
index 00000000..3619ae7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_revoke.yml
@@ -0,0 +1,6 @@
+---
+- name: Always revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_sso_auth }}"
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_sso.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_sso.yml
new file mode 100644
index 00000000..ba7dcd50
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/auth_sso.yml
@@ -0,0 +1,13 @@
+---
+- name: Obtain SSO token using username/password credentials
+ # TODO: remove library/ovirt_auth.py when Ansible 2.5 is out explicitly requiring it
+ environment:
+ OVIRT_URL: https://{{ he_fqdn }}/ovirt-engine/api
+ OVIRT_USERNAME: admin@internal
+ OVIRT_PASSWORD: "{{ he_admin_password }}"
+ ovirt_auth:
+ insecure: true
+ register: ovirt_sso_auth
+ until: ovirt_sso_auth is succeeded
+ retries: 50
+ delay: 10
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/01_prepare_routing_rules.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/01_prepare_routing_rules.yml
new file mode 100644
index 00000000..90697fdf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/01_prepare_routing_rules.yml
@@ -0,0 +1,110 @@
+---
+- name: Prepare routing rules
+ block:
+ - name: Check IPv6
+ set_fact:
+ ipv6_deployment: >-
+ {{ true if he_host_ip not in target_address_v4.stdout_lines and
+ he_host_ip in target_address_v6.stdout_lines
+ else false }}
+ - include_tasks: validate_ip_prefix.yml
+ - include_tasks: alter_libvirt_default_net_configuration.yml
+ # all of the next is a workaround for a network issue:
+ # vdsm installation breaks the routing by defining separate
+ # routing table for ovirtmgmt. But we need to enable communication
+ # between virbr0 and ovirtmgmt
+ - name: Start libvirt
+ service:
+ name: libvirtd
+ state: started
+ enabled: true
+ - name: Activate default libvirt network
+ virt_net:
+ name: default
+ state: active
+ register: virt_net_out
+ - debug: var=virt_net_out
+ - name: Get libvirt interfaces
+ virt_net:
+ command: facts
+ - name: Get routing rules, IPv4
+ command: ip -j rule
+ environment: "{{ he_cmd_lang }}"
+ register: route_rules_ipv4
+ changed_when: true
+ - debug: var=route_rules_ipv4
+ - name: Get routing rules, IPv6
+ command: ip -6 rule
+ environment: "{{ he_cmd_lang }}"
+ register: route_rules_ipv6
+ changed_when: true
+ when: ipv6_deployment|bool
+ - debug: var=route_rules_ipv6
+ - name: Save bridge name
+ set_fact:
+ virbr_default: "{{ ansible_libvirt_networks['default']['bridge'] }}"
+ - name: Wait for the bridge to appear on the host
+ command: ip link show {{ virbr_default }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: ip_link_show_bridge
+ until: ip_link_show_bridge.rc == 0
+ retries: 30
+ delay: 3
+ - name: Refresh network facts
+ setup:
+ tags: ['skip_ansible_lint']
+ - name: Fetch IPv4 CIDR for {{ virbr_default }}
+ set_fact:
+ virbr_cidr_ipv4: >-
+ {{ (hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv4']['address']+'/'
+ +hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv4']['netmask']) |ipv4('host/prefix') }}
+ when: not ipv6_deployment|bool
+ - debug: var=virbr_cidr_ipv4
+ - name: Fetch IPv6 CIDR for {{ virbr_default }}
+ set_fact:
+ virbr_cidr_ipv6: >-
+ {{ (hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv6'][0]['address']+'/'+
+ hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv6'][0]['prefix']) |
+ ipv6('host/prefix') if 'ipv6' in hostvars[inventory_hostname]['ansible_'+virbr_default] else None }}
+ when: ipv6_deployment|bool
+ - debug: var=virbr_cidr_ipv6
+ - name: Add IPv4 outbound route rules
+ command: ip rule add from {{ virbr_cidr_ipv4 }} priority 101 table main
+ environment: "{{ he_cmd_lang }}"
+ register: result
+ when: >-
+ not ipv6_deployment|bool and
+ route_rules_ipv4.stdout | from_json |
+ selectattr('priority', 'equalto', 101) |
+ selectattr('src', 'equalto', virbr_cidr_ipv4 | ipaddr('address') ) |
+ list | length == 0
+ changed_when: true
+ - debug: var=result
+ - name: Add IPv4 inbound route rules
+ command: ip rule add from all to {{ virbr_cidr_ipv4 }} priority 100 table main
+ environment: "{{ he_cmd_lang }}"
+ register: result
+ changed_when: true
+ when: >-
+ not ipv6_deployment|bool and
+ route_rules_ipv4.stdout | from_json |
+ selectattr('priority', 'equalto', 100) |
+ selectattr('dst', 'equalto', virbr_cidr_ipv4 | ipaddr('address') ) |
+ list | length == 0
+ - debug: var=result
+ - name: Add IPv6 outbound route rules
+ command: ip -6 rule add from {{ virbr_cidr_ipv6 }} priority 101 table main
+ environment: "{{ he_cmd_lang }}"
+ register: result
+ when: ipv6_deployment|bool and "\"101:\tfrom \"+virbr_cidr_ipv6+\" lookup main\" not in route_rules_ipv6.stdout"
+ changed_when: true
+ - debug: var=result
+ - name: Add IPv6 inbound route rules
+ command: ip -6 rule add from all to {{ virbr_cidr_ipv6 }} priority 100 table main
+ environment: "{{ he_cmd_lang }}"
+ register: result
+ changed_when: true
+ when: >-
+ ipv6_deployment|bool and "\"100:\tfrom all to \"+virbr_cidr_ipv6+\" lookup main\" not in route_rules_ipv6.stdout"
+ - debug: var=result
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/02_create_local_vm.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/02_create_local_vm.yml
new file mode 100644
index 00000000..7e6dca3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/02_create_local_vm.yml
@@ -0,0 +1,144 @@
+---
+- name: Create hosted engine local vm
+ block:
+ - import_tasks: add_engine_as_ansible_host.yml
+ - name: Initial tasks
+ block:
+ - name: Get host unique id
+ shell: |
+ if [ -e /etc/vdsm/vdsm.id ];
+ then cat /etc/vdsm/vdsm.id;
+ elif [ -e /proc/device-tree/system-id ];
+ then cat /proc/device-tree/system-id; #ppc64le
+ else dmidecode -s system-uuid;
+ fi;
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: unique_id_out
+ - name: Create directory for local VM
+ tempfile:
+ state: directory
+ path: "{{ he_local_vm_dir_path }}"
+ prefix: "{{ he_local_vm_dir_prefix }}"
+ register: otopi_localvm_dir
+ - name: Set local vm dir path
+ set_fact:
+ he_local_vm_dir: "{{ otopi_localvm_dir.path }}"
+ - name: Fix local VM directory permission
+ file:
+ state: directory
+ path: "{{ he_local_vm_dir }}"
+ owner: vdsm
+ group: kvm
+ mode: 0775
+ - include_tasks: install_appliance.yml
+ when: he_appliance_ova is none or he_appliance_ova|length == 0
+ - name: Register appliance PATH
+ set_fact:
+ he_appliance_ova_path: "{{ he_appliance_ova }}"
+ when: he_appliance_ova is not none and he_appliance_ova|length > 0
+ - debug: var=he_appliance_ova_path
+ - name: Check available space on local VM directory
+ shell: df -k --output=avail "{{ he_local_vm_dir_path }}" | grep -v Avail | cat
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: local_vm_dir_space_out
+ - name: Check appliance size
+ shell: zcat "{{ he_appliance_ova_path }}" | wc --bytes
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: appliance_size
+ - name: Ensure we have enough space to extract the appliance
+ assert:
+ that:
+ - "local_vm_dir_space_out.stdout_lines[0]|int * 1024 > appliance_size.stdout_lines[0]|int * 1.1"
+ msg: >
+ {{ he_local_vm_dir_path }} doesn't provide enough free space to extract the
+ engine appliance: {{ local_vm_dir_space_out.stdout_lines[0]|int / 1024 | int }} Mb
+ are available while {{ appliance_size.stdout_lines[0]|int / 1024 / 1024 * 1.1 | int }} Mb
+ are required.
+ - name: Extract appliance to local VM directory
+ unarchive:
+ remote_src: true
+ src: "{{ he_appliance_ova_path }}"
+ dest: "{{ he_local_vm_dir }}"
+ extra_opts: ['--sparse']
+ - include_tasks: get_local_vm_disk_path.yml
+ - name: Get appliance disk size
+ command: qemu-img info --output=json {{ local_vm_disk_path }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: qemu_img_out
+ - debug: var=qemu_img_out
+ - name: Parse qemu-img output
+ set_fact:
+ virtual_size={{ qemu_img_out.stdout|from_json|json_query('"virtual-size"') }}
+ register: otopi_appliance_disk_size
+ - debug: var=virtual_size
+ - name: Hash the appliance root password
+ set_fact:
+ he_hashed_appliance_password: "{{ he_appliance_password | string | password_hash('sha512') }}"
+ no_log: true
+ - name: Create cloud init user-data and meta-data files
+ template:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ with_items:
+ - {src: templates/user-data.j2, dest: "{{ he_local_vm_dir }}/user-data"}
+ - {src: templates/meta-data.j2, dest: "{{ he_local_vm_dir }}/meta-data"}
+ - {src: templates/network-config-dhcp.j2, dest: "{{ he_local_vm_dir }}/network-config"}
+ - name: Create ISO disk
+ command: >-
+ mkisofs -output {{ he_local_vm_dir }}/seed.iso -volid cidata -joliet -rock -input-charset utf-8
+ {{ he_local_vm_dir }}/meta-data {{ he_local_vm_dir }}/user-data
+ {{ he_local_vm_dir }}/network-config
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Create local VM
+ command: >-
+ virt-install -n {{ he_vm_name }}Local --os-variant rhel8.0 --virt-type kvm --memory {{ he_mem_size_MB }}
+ --vcpus {{ he_vcpus }} --network network=default,mac={{ he_vm_mac_addr }},model=virtio
+ --disk {{ local_vm_disk_path }} --import --disk path={{ he_local_vm_dir }}/seed.iso,device=cdrom
+ --noautoconsole --rng /dev/random --graphics vnc --video vga --sound none --controller usb,model=none
+ --memballoon none --boot hd,menu=off --clock kvmclock_present=yes
+ environment: "{{ he_cmd_lang }}"
+ register: create_local_vm
+ changed_when: true
+ - debug: var=create_local_vm
+ - name: Get local VM IP
+ shell: virsh -r net-dhcp-leases default | grep -i {{ he_vm_mac_addr }} | awk '{ print $5 }' | cut -f1 -d'/'
+ environment: "{{ he_cmd_lang }}"
+ register: local_vm_ip
+ until: local_vm_ip.stdout_lines|length >= 1
+ retries: 90
+ delay: 10
+ changed_when: true
+ - debug: var=local_vm_ip
+ - name: Remove leftover entries in /etc/hosts for the local VM
+ lineinfile:
+ dest: /etc/hosts
+ regexp: "# temporary entry added by hosted-engine-setup for the bootstrap VM$"
+ state: absent
+ - name: Create an entry in /etc/hosts for the local VM
+ lineinfile:
+ dest: /etc/hosts
+ line:
+ "{{ local_vm_ip.stdout_lines[0] }} \
+ {{ he_fqdn }} # temporary entry added by hosted-engine-setup for the bootstrap VM"
+ insertbefore: BOF
+ backup: true
+ - name: Wait for SSH to restart on the local VM
+ wait_for:
+ host='{{ he_fqdn }}'
+ port=22
+ delay=30
+ timeout=300
+ rescue:
+ - include_tasks: clean_localvm_dir.yml
+ - include_tasks: clean_local_storage_pools.yml
+ - name: Notify the user about a failure
+ fail:
+ msg: >
+ The system may not be provisioned according to the playbook
+ results: please check the logs for the issue,
+ fix accordingly or re-deploy from scratch.
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml
new file mode 100644
index 00000000..4232ef5c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/03_engine_initial_tasks.yml
@@ -0,0 +1,91 @@
+---
+- name: Initial engine tasks
+ block:
+ - name: Wait for the local VM
+ wait_for_connection:
+ delay: 5
+ timeout: 3600
+ - name: Add an entry for this host on /etc/hosts on the local VM
+ lineinfile:
+ dest: /etc/hosts
+ line: >-
+ {{ hostvars[he_ansible_host_name]['he_host_ip'] }} {{ hostvars[he_ansible_host_name]['he_host_address'] }}
+ - name: Set FQDN
+ command: hostnamectl set-hostname {{ he_fqdn }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Force the local VM FQDN to temporary resolve on the natted network address
+ lineinfile:
+ path: /etc/hosts
+ line:
+ "{{ hostvars[he_ansible_host_name]['local_vm_ip']['stdout_lines'][0] }} {{ he_fqdn }} # hosted-engine-setup-{{ \
+ hostvars[he_ansible_host_name]['he_local_vm_dir'] }}"
+ - name: Reconfigure IPv6 default gateway
+ command: ip -6 route add default via "{{ he_ipv6_subnet_prefix + '::1' }}"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ when: hostvars[he_ansible_host_name]['ipv6_deployment']|bool
+ - name: Restore sshd reverse DNS lookups
+ lineinfile:
+ path: /etc/ssh/sshd_config
+ regexp: '^UseDNS'
+ line: "UseDNS yes"
+ - name: Add lines to answerfile
+ lineinfile:
+ path: /root/ovirt-engine-answers
+ line: "{{ item }}"
+ no_log: true
+ with_items:
+ - "OVESETUP_CONFIG/adminPassword=str:{{ he_admin_password }}"
+ - name: Add lines to answerfile
+ lineinfile:
+ path: /root/ovirt-engine-answers
+ line: "{{ item }}"
+ no_log: true
+ with_items:
+ - "OVESETUP_DB/password=str:{{ he_db_password }}"
+ when: he_db_password is defined
+ - name: Add lines to answerfile
+ lineinfile:
+ path: /root/ovirt-engine-answers
+ line: "{{ item }}"
+ no_log: true
+ with_items:
+ - "OVESETUP_DWH_DB/password=str:{{ he_dwh_db_password }}"
+ when: he_dwh_db_password is defined
+ - name: Import OpenSCAP task
+ import_tasks: apply_openscap_profile.yml
+ when: he_apply_openscap_profile|bool
+ - name: Include before engine-setup custom tasks files for the engine VM
+ include_tasks: "{{ item }}"
+ with_fileglob: "hooks/enginevm_before_engine_setup/*.yml"
+ register: include_before_engine_setup_results
+ - debug: var=include_before_engine_setup_results
+ - name: Restore a backup
+ block:
+ - include_tasks: restore_backup.yml
+ when: he_restore_from_file is defined and he_restore_from_file
+ rescue:
+ - name: Sync on engine machine
+ command: sync
+ changed_when: true
+ - name: Fetch logs from the engine VM
+ import_tasks: fetch_engine_logs.yml
+ ignore_errors: true
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Get local VM dir path
+ set_fact:
+ he_local_vm_dir={{ hostvars[he_ansible_host_name]['he_local_vm_dir'] }}
+ - name: Clean bootstrap VM
+ import_tasks: clean_localvm_dir.yml
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Clean local storage pools
+ import_tasks: clean_local_storage_pools.yml
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Notify the user about a failure
+ fail:
+ msg: >
+ There was a failure deploying the engine on the local engine VM.
+ The system may not be provisioned according to the playbook
+ results: please check the logs for the issue,
+ fix accordingly or re-deploy from scratch.
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/04_engine_final_tasks.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/04_engine_final_tasks.yml
new file mode 100644
index 00000000..b6cef493
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/04_engine_final_tasks.yml
@@ -0,0 +1,78 @@
+---
+- name: Final engine tasks
+ block:
+ - name: Include after engine-setup custom tasks files for the engine VM
+ include_tasks: "{{ item }}"
+ with_fileglob: "hooks/enginevm_after_engine_setup/*.yml"
+ register: include_after_engine_setup_results
+ - debug: var=include_after_engine_setup_results
+ # After a restart the engine has a 5 minute grace time,
+ # other actions like electing a new SPM host or reconstructing
+ # the master storage domain could require more time
+ - name: Wait for the engine to reach a stable condition
+ wait_for: timeout=600
+ when: he_restore_from_file is defined and he_restore_from_file
+ - name: Configure LibgfApi support
+ command: engine-config -s LibgfApiSupported=true --cver=4.2
+ environment: "{{ he_cmd_lang }}"
+ register: libgfapi_support_out
+ changed_when: true
+ when: he_enable_libgfapi|bool
+ - debug: var=libgfapi_support_out
+ - name: Save original OvfUpdateIntervalInMinutes
+ shell: "engine-config -g OvfUpdateIntervalInMinutes | cut -d' ' -f2 > /root/OvfUpdateIntervalInMinutes.txt"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Set OVF update interval to 1 minute
+ command: engine-config -s OvfUpdateIntervalInMinutes=1
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Allow the webadmin UI to be accessed over the first host
+ block:
+ - name: Saving original value
+ replace:
+ path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf
+ regexp: '^(SSO_ALTERNATE_ENGINE_FQDNS=.*)'
+ replace: '#\1 # pre hosted-engine-setup'
+ - name: Adding new SSO_ALTERNATE_ENGINE_FQDNS line
+ lineinfile:
+ path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf
+ line: 'SSO_ALTERNATE_ENGINE_FQDNS="{{ he_host_address }}" # hosted-engine-setup'
+ - name: Restart ovirt-engine service for changed OVF Update configuration and LibgfApi support
+ systemd:
+ state: restarted
+ name: ovirt-engine
+ register: restart_out
+ - debug: var=restart_out
+ - name: Mask cloud-init services to speed up future boot
+ systemd:
+ masked: true
+ name: "{{ item }}"
+ with_items:
+ - cloud-init-local
+ - cloud-init
+ rescue:
+ - name: Sync on engine machine
+ command: sync
+ changed_when: true
+ - name: Fetch logs from the engine VM
+ import_tasks: fetch_engine_logs.yml
+ ignore_errors: true
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Get local VM dir path
+ set_fact:
+ he_local_vm_dir={{ hostvars[he_ansible_host_name]['he_local_vm_dir'] }}
+ - name: Clean bootstrap VM
+ import_tasks: clean_localvm_dir.yml
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Clean local storage pools
+ import_tasks: clean_local_storage_pools.yml
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Notify the user about a failure
+ fail:
+ msg:
+ There was a failure deploying the engine on the local engine VM.
+ The system may not be provisioned according to the playbook results,
+ please check the logs for the issue,
+ fix accordingly or re-deploy from scratch.
+...
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/05_add_host.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/05_add_host.yml
new file mode 100644
index 00000000..b5c76994
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/bootstrap_local_vm/05_add_host.yml
@@ -0,0 +1,281 @@
+---
+- name: Add host
+ block:
+ - name: Wait for ovirt-engine service to start
+ uri:
+ url: http://{{ he_fqdn }}/ovirt-engine/services/health
+ return_content: true
+ register: engine_status
+ until: "'DB Up!Welcome to Health Status!' in engine_status.content"
+ retries: 30
+ delay: 20
+ - debug: var=engine_status
+ - name: Open a port on firewalld
+ firewalld:
+ port: "{{ he_webui_forward_port }}/tcp"
+ permanent: false
+ immediate: true
+ state: enabled
+ - name: Expose engine VM webui over a local port via ssh port forwarding
+ command: >-
+ sshpass -e ssh -tt -o ServerAliveInterval=5 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -g -L
+ {{ he_webui_forward_port }}:{{ he_fqdn }}:443 {{ he_fqdn }}
+ environment:
+ - "{{ he_cmd_lang }}"
+ - SSHPASS: "{{ he_appliance_password }}"
+ changed_when: true
+ async: 86400
+ poll: 0
+ register: sshpf
+ - debug: var=sshpf
+ - name: Evaluate temporary bootstrap engine URL
+ set_fact: bootstrap_engine_url="https://{{ he_host_address }}:{{ he_webui_forward_port }}/ovirt-engine/"
+ - debug:
+ msg: >-
+ The bootstrap engine is temporary accessible over {{ bootstrap_engine_url }}
+ - name: Detect VLAN ID
+ shell: ip -d link show {{ he_bridge_if }} | grep 'vlan ' | grep -Po 'id \K[\d]+' | cat
+ environment: "{{ he_cmd_lang }}"
+ register: vlan_id_out
+ changed_when: true
+ - debug: var=vlan_id_out
+ - name: Set Engine public key as authorized key without validating the TLS/SSL certificates
+ authorized_key:
+ user: root
+ state: present
+ key: https://{{ he_fqdn }}/ovirt-engine/services/pki-resource?resource=engine-certificate&format=OPENSSH-PUBKEY
+ validate_certs: false
+ - include_tasks: auth_sso.yml
+ - name: Ensure that the target datacenter is present
+ ovirt_datacenter:
+ state: present
+ name: "{{ he_data_center }}"
+ compatibility_version: "{{ he_data_center_comp_version | default(omit) }}"
+ wait: true
+ local: false
+ auth: "{{ ovirt_auth }}"
+ register: dc_result_presence
+ - name: Ensure that the target cluster is present in the target datacenter
+ ovirt_cluster:
+ state: present
+ name: "{{ he_cluster }}"
+ compatibility_version: "{{ he_cluster_comp_version | default(omit) }}"
+ data_center: "{{ he_data_center }}"
+ cpu_type: "{{ he_cluster_cpu_type | default(omit) }}"
+ wait: true
+ auth: "{{ ovirt_auth }}"
+ register: cluster_result_presence
+ - name: Check actual cluster location
+ fail:
+ msg: >-
+ A cluster named '{{ he_cluster }}' has been created earlier in a different
+ datacenter and cluster moving is still not supported.
+ You can avoid this specifying a different cluster name;
+ please fix accordingly and try again.
+ when: cluster_result_presence.cluster.data_center.id != dc_result_presence.datacenter.id
+ - name: Enable GlusterFS at cluster level
+ ovirt_cluster:
+ data_center: "{{ he_data_center }}"
+ name: "{{ he_cluster }}"
+ compatibility_version: "{{ he_cluster_comp_version | default(omit) }}"
+ auth: "{{ ovirt_auth }}"
+ virt: true
+ gluster: true
+ fence_skip_if_gluster_bricks_up: true
+ fence_skip_if_gluster_quorum_not_met: true
+ when: he_enable_hc_gluster_service is defined and he_enable_hc_gluster_service
+ - name: Set VLAN ID at datacenter level
+ ovirt_network:
+ data_center: "{{ he_data_center }}"
+ name: "{{ he_mgmt_network }}"
+ vlan_tag: "{{ vlan_id_out.stdout }}"
+ auth: "{{ ovirt_auth }}"
+ when: vlan_id_out.stdout|length > 0
+ - name: Get active list of active firewalld zones
+ shell: set -euo pipefail && firewall-cmd --get-active-zones | grep -v "^\s*interfaces"
+ environment: "{{ he_cmd_lang }}"
+ register: active_f_zone
+ changed_when: true
+ - name: Configure libvirt firewalld zone
+ firewalld:
+ zone: libvirt
+ service: "{{ service_item }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ with_items:
+ - vdsm
+ - libvirt-tls
+ - ovirt-imageio
+ - ovirt-vmconsole
+ - ssh
+ - vdsm
+ loop_control:
+ loop_var: service_item
+ when: "'libvirt' in active_f_zone.stdout_lines"
+ - name: Add host
+ ovirt_host:
+ cluster: "{{ he_cluster }}"
+ name: "{{ he_host_name }}"
+ state: present
+ public_key: true
+ address: "{{ he_host_address }}"
+ auth: "{{ ovirt_auth }}"
+ async: 1
+ poll: 0
+ - name: Include after_add_host tasks files
+ include_tasks: "{{ item }}"
+ with_fileglob: "hooks/after_add_host/*.yml"
+ register: include_after_add_host_results
+ - debug: var=include_after_add_host_results
+ - name: Pause the execution to let the user interactively reconfigure the host
+ block:
+ - name: Let the user connect to the bootstrap engine to manually fix host configuration
+ debug:
+ msg: >-
+ You can now connect to {{ bootstrap_engine_url }} and check the status of this host and
+ eventually remediate it, please continue only when the host is listed as 'up'
+ - include_tasks: pause_execution.yml
+ when: he_pause_host|bool
+ # refresh the auth token after a long operation to avoid having it expired
+ - include_tasks: auth_revoke.yml
+ - include_tasks: auth_sso.yml
+ - name: Wait for the host to be up
+ ovirt_host_info:
+ pattern: name={{ he_host_name }}
+ auth: "{{ ovirt_auth }}"
+ register: host_result_up_check
+ until: >-
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ (
+ host_result_up_check.ovirt_hosts[0].status == 'up' or
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
+ )
+ retries: 120
+ delay: 10
+ ignore_errors: true
+ - debug: var=host_result_up_check
+ - name: Notify the user about a failure
+ fail:
+ msg: >-
+ Host is not up, please check logs, perhaps also on the engine machine
+ when: host_result_up_check is failed
+
+ - name: Emit error messages about the failure
+ block:
+ - set_fact: host_id={{ host_result_up_check.ovirt_hosts[0].id }}
+ - name: Collect error events from the Engine
+ ovirt_event_info:
+ auth: "{{ ovirt_auth }}"
+ search: "severity>=warning"
+ register: error_events
+
+ - name: Generate the error message from the engine events
+ set_fact:
+ error_description: >-
+ {% for event in error_events.ovirt_events | groupby('code') %}
+ {% if 'host' in event[1][0] and 'id' in event[1][0].host and event[1][0].host.id == host_id %}
+ code {{ event[0] }}: {{ event[1][0].description }},
+ {% endif %}
+ {% endfor %}
+ ignore_errors: true
+
+ - name: Notify with error description
+ debug:
+ msg: >-
+ The host has been set in non_operational status,
+ deployment errors: {{ error_description }}
+ when: error_description is defined
+
+ - name: Notify with generic error
+ debug:
+ msg: >-
+ The host has been set in non_operational status,
+ please check engine logs,
+ more info can be found in the engine logs.
+ when: error_description is not defined
+ when: >-
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
+
+ - name: Pause the execution to let the user interactively reconfigure the host
+ block:
+ - name: Let the user connect to the bootstrap engine to manually fix host configuration
+ debug:
+ msg: >-
+ You can now connect to {{ bootstrap_engine_url }} and check the status of this host and
+ eventually remediate it, please continue only when the host is listed as 'up'
+ - include_tasks: pause_execution.yml
+ when: >-
+ he_pause_after_failed_add_host|bool and
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
+
+ # refresh the auth token after a long operation to avoid having it expired
+ - include_tasks: auth_revoke.yml
+ - include_tasks: auth_sso.yml
+ - name: Check if the host is up
+ ovirt_host_info:
+ pattern: name={{ he_host_name }}
+ auth: "{{ ovirt_auth }}"
+ register: host_result_up_check
+ ignore_errors: true
+
+ - name: Handle deployment failure
+ block:
+ - set_fact: host_id={{ host_result_up_check.ovirt_hosts[0].id }}
+ - name: Collect error events from the Engine
+ ovirt_event_info:
+ auth: "{{ ovirt_auth }}"
+ search: "severity>=warning"
+ register: error_events
+
+ - name: Generate the error message from the engine events
+ set_fact:
+ error_description: >-
+ {% for event in error_events.ovirt_events | groupby('code') %}
+ {% if event[1][0].host.id == host_id %}
+ code {{ event[0] }}: {{ event[1][0].description }},
+ {% endif %}
+ {% endfor %}
+ ignore_errors: true
+
+ - name: Fail with error description
+ fail:
+ msg: >-
+ The host has been set in non_operational status,
+ deployment errors: {{ error_description }}
+ fix accordingly and re-deploy.
+ when: error_description is defined
+
+ - name: Fail with generic error
+ fail:
+ msg: >-
+ The host has been set in non_operational status,
+ please check engine logs,
+ more info can be found in the engine logs,
+ fix accordingly and re-deploy.
+ when: error_description is not defined
+
+ when: >-
+ host_result_up_check is succeeded and
+ host_result_up_check.ovirt_hosts|length >= 1 and
+ host_result_up_check.ovirt_hosts[0].status == 'non_operational'
+ rescue:
+ - name: Sync on engine machine
+ command: sync
+ changed_when: true
+ - name: Fetch logs from the engine VM
+ include_tasks: fetch_engine_logs.yml
+ ignore_errors: true
+ - include_tasks: clean_localvm_dir.yml
+ - include_tasks: clean_local_storage_pools.yml
+ - name: Notify the user about a failure
+ fail:
+ msg: >
+ The system may not be provisioned according to the playbook
+ results: please check the logs for the issue,
+ fix accordingly or re-deploy from scratch.
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_local_storage_pools.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_local_storage_pools.yml
new file mode 100644
index 00000000..f6344552
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_local_storage_pools.yml
@@ -0,0 +1,28 @@
+---
+- name: Clean storage-pool
+ block:
+ - name: Destroy local storage-pool {{ he_local_vm_dir | basename }}
+ command: >-
+ virsh -c qemu:///system?authfile={{ he_libvirt_authfile }}
+ pool-destroy {{ he_local_vm_dir | basename }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Undefine local storage-pool {{ he_local_vm_dir | basename }}
+ command: >-
+ virsh -c qemu:///system?authfile={{ he_libvirt_authfile }}
+ pool-undefine {{ he_local_vm_dir | basename }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Destroy local storage-pool {{ local_vm_disk_path.split('/')[5] }}
+ command: >-
+ virsh -c qemu:///system?authfile={{ he_libvirt_authfile }}
+ pool-destroy {{ local_vm_disk_path.split('/')[5] }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Undefine local storage-pool {{ local_vm_disk_path.split('/')[5] }}
+ command: >-
+ virsh -c qemu:///system?authfile={{ he_libvirt_authfile }}
+ pool-undefine {{ local_vm_disk_path.split('/')[5] }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_localvm_dir.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_localvm_dir.yml
new file mode 100644
index 00000000..181f4453
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/clean_localvm_dir.yml
@@ -0,0 +1,12 @@
+---
+- name: Remove local vm dir
+ file:
+ path: "{{ he_local_vm_dir }}"
+ state: absent
+ register: rm_localvm_dir
+- debug: var=rm_localvm_dir
+- name: Remove temporary entry in /etc/hosts for the local VM
+ lineinfile:
+ dest: /etc/hosts
+ regexp: "# temporary entry added by hosted-engine-setup for the bootstrap VM$"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_storage_domain.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_storage_domain.yml
new file mode 100644
index 00000000..59cf2a9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_storage_domain.yml
@@ -0,0 +1,182 @@
+---
+- name: Create hosted engine local vm
+ block:
+ - name: Wait for the storage interface to be up
+ command: ip -j link show '{{ he_storage_if }}'
+ register: storage_if_result_up_check
+ until: >-
+ storage_if_result_up_check.stdout|from_json|map(attribute='operstate')|join('') == 'UP'
+ retries: 120
+ delay: 5
+ delegate_to: "{{ he_ansible_host_name }}"
+ when: (he_domain_type == "glusterfs" or he_domain_type == "nfs") and he_storage_if is not none
+ - name: Check local VM dir stat
+ stat:
+ path: "{{ he_local_vm_dir }}"
+ register: local_vm_dir_stat
+ - name: Enforce local VM dir existence
+ fail:
+ msg: "Local VM dir '{{ he_local_vm_dir }}' doesn't exist"
+ when: not local_vm_dir_stat.stat.exists
+ - include_tasks: auth_sso.yml
+ - name: Fetch host facts
+ ovirt_host_info:
+ pattern: name={{ he_host_name }}
+ auth: "{{ ovirt_auth }}"
+ register: host_result
+ until: >-
+ host_result and 'ovirt_hosts' in host_result
+ and host_result.ovirt_hosts|length >= 1 and
+ 'up' in host_result.ovirt_hosts[0].status
+ retries: 50
+ delay: 10
+ - debug: var=host_result
+ - name: Fetch cluster ID
+ set_fact: cluster_id="{{ host_result.ovirt_hosts[0].cluster.id }}"
+ - name: Fetch cluster facts
+ ovirt_cluster_info:
+ auth: "{{ ovirt_auth }}"
+ register: cluster_facts
+ - debug: var=cluster_facts
+ - name: Fetch Datacenter facts
+ ovirt_datacenter_info:
+ auth: "{{ ovirt_auth }}"
+ register: datacenter_facts
+ - debug: var=datacenter_facts
+ - name: Fetch Datacenter ID
+ set_fact: >-
+ datacenter_id={{ cluster_facts.ovirt_clusters|json_query("[?id=='" + cluster_id + "'].data_center.id")|first }}
+ - name: Fetch Datacenter name
+ set_fact: >-
+ datacenter_name={{ datacenter_facts.ovirt_datacenters|json_query("[?id=='" + datacenter_id + "'].name")|first }}
+ - name: Add NFS storage domain
+ ovirt_storage_domain:
+ state: unattached
+ name: "{{ he_storage_domain_name }}"
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ wait: true
+ nfs:
+ address: "{{ he_storage_domain_addr }}"
+ path: "{{ he_storage_domain_path }}"
+ mount_options: "{{ he_mount_options }}"
+ version: "{{ he_nfs_version }}"
+ auth: "{{ ovirt_auth }}"
+ when: he_domain_type == "nfs"
+ register: otopi_storage_domain_details_nfs
+ - name: Add glusterfs storage domain
+ ovirt_storage_domain:
+ state: unattached
+ name: "{{ he_storage_domain_name }}"
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ wait: true
+ glusterfs:
+ address: "{{ he_storage_domain_addr }}"
+ path: "{{ he_storage_domain_path }}"
+ mount_options: "{{ he_mount_options }}"
+ auth: "{{ ovirt_auth }}"
+ when: he_domain_type == "glusterfs"
+ register: otopi_storage_domain_details_gluster
+ - name: Add iSCSI storage domain
+ ovirt_storage_domain:
+ state: unattached
+ name: "{{ he_storage_domain_name }}"
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ wait: true
+ discard_after_delete: "{{ he_discard }}"
+ # we are sending a single iSCSI path but, not sure if intended or if
+ # it's bug, the engine is implicitly creating the storage domain
+ # consuming all the path that are already connected on the host (we
+ # cannot logout since there is not logout command in the rest API, see
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1535951 ).
+ iscsi:
+ address: "{{ he_storage_domain_addr.split(',')|first }}"
+ port: "{{ he_iscsi_portal_port.split(',')|first if he_iscsi_portal_port is string else he_iscsi_portal_port }}"
+ target: "{{ he_iscsi_target }}"
+ lun_id: "{{ he_lun_id }}"
+ username: "{{ he_iscsi_username }}"
+ password: "{{ he_iscsi_password }}"
+ auth: "{{ ovirt_auth }}"
+ when: he_domain_type == "iscsi"
+ register: otopi_storage_domain_details_iscsi
+ - name: Add Fibre Channel storage domain
+ ovirt_storage_domain:
+ state: unattached
+ name: "{{ he_storage_domain_name }}"
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ wait: true
+ discard_after_delete: "{{ he_discard }}"
+ fcp:
+ lun_id: "{{ he_lun_id }}"
+ auth: "{{ ovirt_auth }}"
+ register: otopi_storage_domain_details_fc
+ when: he_domain_type == "fc"
+ - name: Get storage domain details
+ ovirt_storage_domain_info:
+ pattern: name={{ he_storage_domain_name }}
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_details
+ - debug: var=storage_domain_details
+ - name: Find the appliance OVF
+ find:
+ paths: "{{ he_local_vm_dir }}/master"
+ recurse: true
+ patterns: ^.*.(?<!meta).ovf$
+ use_regex: true
+ register: app_ovf
+ - debug: var=app_ovf
+ - name: Parse OVF
+ xml:
+ path: "{{ app_ovf.files[0].path }}"
+ xpath: /ovf:Envelope/Section/Disk
+ namespaces:
+ ovf: http://schemas.dmtf.org/ovf/envelope/1/
+ rasd: http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData
+ vssd: http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData
+ xsi: http://www.w3.org/2001/XMLSchema-instance
+ content: attribute
+ register: disk_size_xml
+ - name: Get required size
+ set_fact:
+ required_size: >-
+ {{ disk_size_xml.matches[0].Disk['{http://schemas.dmtf.org/ovf/envelope/1/}size']|int * 1024 * 1024 * 1024 +
+ storage_domain_details.ovirt_storage_domains[0].critical_space_action_blocker|int *
+ 1024 * 1024 * 1024 + 5 * 1024 * 1024 * 1024 }}
+ # +5G: 2xOVF_STORE, lockspace, metadata, configuration
+ - debug: var=required_size
+ - name: Remove unsuitable storage domain
+ ovirt_storage_domain:
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ name: "{{ he_storage_domain_name }}"
+ wait: true
+ state: absent
+ destroy: true
+ auth: "{{ ovirt_auth }}"
+ when: storage_domain_details.ovirt_storage_domains[0].available|int < required_size|int
+ register: remove_storage_domain_details
+ - debug: var=remove_storage_domain_details
+ - name: Check storage domain free space
+ fail:
+ msg: >-
+ Error: the target storage domain contains only
+ {{ storage_domain_details.ovirt_storage_domains[0].available|int / 1024 / 1024 / 1024 }}GiB of
+ available space while a minimum of {{ required_size|int / 1024 / 1024 / 1024 }}GiB is required
+ If you wish to use the current target storage domain by extending it, make sure it contains nothing
+ before adding it.
+ when: storage_domain_details.ovirt_storage_domains[0].available|int < required_size|int
+ - name: Activate storage domain
+ ovirt_storage_domain:
+ host: "{{ he_host_name }}"
+ data_center: "{{ datacenter_name }}"
+ name: "{{ he_storage_domain_name }}"
+ wait: true
+ state: present
+ auth: "{{ ovirt_auth }}"
+ when: storage_domain_details.ovirt_storage_domains[0].available|int >= required_size|int
+ register: otopi_storage_domain_details
+ - debug: var=otopi_storage_domain_details
+...
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/01_create_target_hosted_engine_vm.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/01_create_target_hosted_engine_vm.yml
new file mode 100644
index 00000000..9344fedf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/01_create_target_hosted_engine_vm.yml
@@ -0,0 +1,186 @@
+---
+- name: Create target Hosted Engine VM
+ block:
+ - import_tasks: add_engine_as_ansible_host.yml
+ - include_tasks: auth_sso.yml
+ - name: Get local VM IP
+ shell: virsh -r net-dhcp-leases default | grep -i {{ he_vm_mac_addr }} | awk '{ print $5 }' | cut -f1 -d'/'
+ environment: "{{ he_cmd_lang }}"
+ register: local_vm_ip
+ changed_when: true
+ - debug: var=local_vm_ip
+ - name: Fetch host facts
+ ovirt_host_info:
+ pattern: name={{ he_host_name }} status=up
+ auth: "{{ ovirt_auth }}"
+ register: host_result
+ until: host_result is succeeded and host_result.ovirt_hosts|length >= 1
+ retries: 50
+ delay: 10
+ - debug: var=host_result
+ - name: Fetch Cluster ID
+ set_fact: cluster_id="{{ host_result.ovirt_hosts[0].cluster.id }}"
+ - name: Fetch Cluster facts
+ ovirt_cluster_info:
+ auth: "{{ ovirt_auth }}"
+ register: cluster_facts
+ - debug: var=cluster_facts
+ - name: Fetch Datacenter facts
+ ovirt_datacenter_info:
+ auth: "{{ ovirt_auth }}"
+ register: datacenter_facts
+ - debug: var=datacenter_facts
+ - name: Fetch Cluster name
+ set_fact: cluster_name={{ cluster_facts.ovirt_clusters|json_query("[?id=='" + cluster_id + "'].name")|first }}
+ - name: Fetch Datacenter ID
+ set_fact: >-
+ datacenter_id={{ cluster_facts.ovirt_clusters|json_query("[?id=='" + cluster_id + "'].data_center.id")|first }}
+ - name: Fetch Datacenter name
+ set_fact: >-
+ datacenter_name={{ datacenter_facts.ovirt_datacenters|json_query("[?id=='" + datacenter_id + "'].name")|first }}
+ - name: Parse Cluster details
+ set_fact:
+ cluster_cpu: >-
+ {{ cluster_facts.ovirt_clusters|selectattr('id', 'match', '^'+cluster_id+'$')|map(attribute='cpu')|list|first }}
+ cluster_version: >-
+ {{ cluster_facts.ovirt_clusters|selectattr('id', 'match', '^'+cluster_id+'$')|
+ map(attribute='version')|list|first }}
+ - name: Get server CPU list
+ ovirt_system_option_info:
+ auth: "{{ ovirt_auth }}"
+ name: ServerCPUList
+ version: "{{ cluster_version.major }}.{{ cluster_version.minor }}"
+ register: server_cpu_list
+ - debug: var=server_cpu_list
+ - name: Get cluster emulated machine list
+ ovirt_system_option_info:
+ name: ClusterEmulatedMachines
+ auth: "{{ ovirt_auth }}"
+ version: "{{ cluster_version.major }}.{{ cluster_version.minor }}"
+ register: emulated_machine_list
+ - name: Prepare for parsing server CPU list
+ set_fact:
+ server_cpu_dict: {}
+ - name: Parse server CPU list
+ set_fact:
+ server_cpu_dict: "{{ server_cpu_dict | combine({item.split(':')[1]: item.split(':')[3]}) }}"
+ with_items: >-
+ {{ server_cpu_list['ovirt_system_option']['values'][0]['value'].split('; ')|list|difference(['']) }}
+ - debug: var=server_cpu_dict
+ - name: Convert CPU model name
+ set_fact:
+ cluster_cpu_model: "{{ server_cpu_dict[cluster_cpu.type] }}"
+ - debug: var=cluster_cpu_model
+ - name: Parse emulated_machine
+ set_fact:
+ emulated_machine: >-
+ {{ emulated_machine_list['ovirt_system_option']['values'][0]['value'].replace(
+ '[','').replace(']','').split(', ')|first }}
+ - name: Get storage domain details
+ ovirt_storage_domain_info:
+ pattern: name={{ he_storage_domain_name }} and datacenter={{ datacenter_name }}
+ auth: "{{ ovirt_auth }}"
+ register: storage_domain_details
+ - debug: var=storage_domain_details
+ - name: Add HE disks
+ ovirt_disk:
+ name: "{{ item.name }}"
+ size: "{{ item.size }}"
+ format: "{{ item.format }}"
+ sparse: "{{ item.sparse }}"
+ description: "{{ item.description }}"
+ content_type: "{{ item.content }}"
+ interface: virtio
+ storage_domain: "{{ he_storage_domain_name }}"
+ wait: true
+ timeout: 600
+ auth: "{{ ovirt_auth }}"
+ with_items:
+ - {
+ name: 'he_virtio_disk',
+ description: 'Hosted-Engine disk',
+ size: "{{ he_disk_size_GB }}GiB",
+ format: 'raw',
+ sparse: "{{ false if he_domain_type == 'fc' or he_domain_type == 'iscsi' else true }}",
+ content: 'hosted_engine'
+ }
+ - {
+ name: 'he_sanlock',
+ description: 'Hosted-Engine sanlock disk',
+ size: '1GiB',
+ format: 'raw',
+ sparse: false,
+ content: 'hosted_engine_sanlock'
+ }
+ - {
+ name: 'HostedEngineConfigurationImage',
+ description: 'Hosted-Engine configuration disk',
+ size: '1GiB',
+ format: 'raw',
+ sparse: false,
+ content: 'hosted_engine_configuration'
+ }
+ - {
+ name: 'he_metadata',
+ description: 'Hosted-Engine metadata disk',
+ size: '1GiB',
+ format: 'raw',
+ sparse: false,
+ content: 'hosted_engine_metadata'
+ }
+ register: add_disks
+ - name: Register disk details
+ set_fact:
+ he_virtio_disk_details: "{{ add_disks.results[0] }}"
+ he_sanlock_disk_details: "{{ add_disks.results[1] }}"
+ he_conf_disk_details: "{{ add_disks.results[2] }}"
+ he_metadata_disk_details: "{{ add_disks.results[3] }}"
+ - debug: var=add_disks
+ - name: Set default graphics protocols
+ set_fact:
+ he_graphic_protocols: [vnc, spice]
+ - name: Check if FIPS is enabled
+ command: sysctl -n crypto.fips_enabled
+ register: he_fips_enabled
+ - debug: var=he_fips_enabled
+ - name: Select graphic protocols
+ set_fact:
+ he_graphic_protocols: [spice]
+ when: he_fips_enabled.stdout == "1"
+ - debug: var=he_graphic_protocols
+ - name: Add VM
+ ovirt_vm:
+ state: stopped
+ cluster: "{{ cluster_name }}"
+ name: "{{ he_vm_name }}"
+ description: 'Hosted Engine Virtual Machine'
+ memory: "{{ he_mem_size_MB }}Mib"
+ cpu_cores: "{{ he_vcpus }}"
+ cpu_sockets: 1
+ graphical_console:
+ headless_mode: false
+ protocol: "{{ he_graphic_protocols }}"
+ serial_console: false
+ operating_system: rhel_8x64
+ bios_type: q35_sea_bios
+ type: server
+ high_availability_priority: 1
+ high_availability: false
+ delete_protected: true
+ # timezone: "{{ he_time_zone }}" # TODO: fix with the right parameter syntax
+ disks:
+ - id: "{{ he_virtio_disk_details.disk.id }}"
+ nics:
+ - name: vnet0
+ profile_name: "{{ he_mgmt_network }}"
+ interface: virtio
+ mac_address: "{{ he_vm_mac_addr }}"
+ auth: "{{ ovirt_auth }}"
+ register: he_vm_details
+ - debug: var=he_vm_details
+ - name: Register external local VM uuid
+ shell: virsh -r domuuid {{ he_vm_name }}Local | head -1
+ environment: "{{ he_cmd_lang }}"
+ register: external_local_vm_uuid
+ changed_when: true
+ - debug: var=external_local_vm_uuid
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/02_engine_vm_configuration.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/02_engine_vm_configuration.yml
new file mode 100644
index 00000000..269756e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/02_engine_vm_configuration.yml
@@ -0,0 +1,91 @@
+---
+- name: Engine VM configuration tasks
+ block:
+ - name: Create a temporary directory for ansible as postgres user
+ file:
+ path: /var/lib/pgsql/.ansible/tmp
+ state: directory
+ owner: postgres
+ group: postgres
+ mode: 0700
+ - name: Update target VM details at DB level
+ command: >-
+ psql -d engine -c
+ "UPDATE vm_static SET {{ item.field }}={{ item.value }} WHERE
+ vm_guid='{{ hostvars[he_ansible_host_name]['he_vm_details']['vm']['id'] }}'"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: postgres
+ become_method: sudo
+ changed_when: true
+ register: db_vm_update
+ with_items:
+ - {field: 'origin', value: 6}
+ - debug: var=db_vm_update
+ - name: Insert Hosted Engine configuration disk uuid into Engine database
+ command: >-
+ psql -d engine -c
+ "UPDATE vdc_options SET option_value=
+ '{{ hostvars[he_ansible_host_name]['he_conf_disk_details']['disk']['id'] }}'
+ WHERE option_name='HostedEngineConfigurationImageGuid' AND version='general'"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: postgres
+ become_method: sudo
+ changed_when: true
+ register: db_conf_update
+ - debug: var=db_conf_update
+ - name: Fetch host SPM_ID
+ command: >-
+ psql -t -d engine -c
+ "SELECT vds_spm_id FROM vds WHERE vds_name='{{ hostvars[he_ansible_host_name]['he_host_name'] }}'"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: postgres
+ become_method: sudo
+ changed_when: true
+ register: host_spm_id_out
+ - name: Parse host SPM_ID
+ set_fact: host_spm_id="{{ host_spm_id_out.stdout_lines|first|trim }}"
+ - debug: var=host_spm_id
+ - name: Restore original DisableFenceAtStartupInSec
+ shell: "engine-config -s DisableFenceAtStartupInSec=$(cat /root/DisableFenceAtStartupInSec.txt)"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ when: he_restore_from_file is defined and he_restore_from_file
+ - name: Remove DisableFenceAtStartupInSec temporary file
+ file:
+ path: /root/DisableFenceAtStartupInSec.txt
+ state: absent
+ when: he_restore_from_file is defined and he_restore_from_file
+ - name: Restore original OvfUpdateIntervalInMinutes
+ shell: "engine-config -s OvfUpdateIntervalInMinutes=$(cat /root/OvfUpdateIntervalInMinutes.txt)"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ - name: Remove OvfUpdateIntervalInMinutes temporary file
+ file:
+ path: /root/OvfUpdateIntervalInMinutes.txt
+ state: absent
+ changed_when: true
+ - name: Restore original SSO_ALTERNATE_ENGINE_FQDNS
+ block:
+ - name: Removing temporary value
+ lineinfile:
+ path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf
+ regexp: '^SSO_ALTERNATE_ENGINE_FQDNS=.* # hosted-engine-setup'
+ state: absent
+ - name: Restoring original value
+ replace:
+ path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf
+ regexp: '^#(SSO_ALTERNATE_ENGINE_FQDNS=.*) # pre hosted-engine-setup'
+ replace: '\1'
+ - name: Remove temporary directory for ansible as postgres user
+ file:
+ path: /var/lib/pgsql/.ansible
+ state: absent
+ - name: Configure PermitRootLogin for sshd to its final value
+ lineinfile:
+ dest: /etc/ssh/sshd_config
+ regexp: "^\\s*PermitRootLogin"
+ line: "PermitRootLogin {{ he_root_ssh_access }}"
+ state: present
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/03_hosted_engine_final_tasks.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/03_hosted_engine_final_tasks.yml
new file mode 100644
index 00000000..8c0e0f88
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/create_target_vm/03_hosted_engine_final_tasks.yml
@@ -0,0 +1,431 @@
+---
+- name: Hosted-Engine final tasks
+ block:
+ - name: Choose IPv4, IPv6 or auto
+ import_tasks: ipv_switch.yml
+ - name: Trigger hosted engine OVF update and enable the serial console
+ ovirt_vm:
+ id: "{{ he_vm_details.vm.id }}"
+ description: "Hosted engine VM"
+ serial_console: true
+ auth: "{{ ovirt_auth }}"
+ - name: Wait until OVF update finishes
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ fetch_nested: true
+ nested_attributes:
+ - name
+ - image_id
+ - id
+ pattern: "name={{ he_storage_domain_name }}"
+ retries: 12
+ delay: 10
+ register: storage_domain_details
+ until: "storage_domain_details.ovirt_storage_domains[0].disks | selectattr('name', 'match', '^OVF_STORE$') | list"
+ - debug: var=storage_domain_details
+ - name: Parse OVF_STORE disk list
+ set_fact:
+ ovf_store_disks: >-
+ {{ storage_domain_details.ovirt_storage_domains[0].disks |
+ selectattr('name', 'match', '^OVF_STORE$') | list }}
+ - debug: var=ovf_store_disks
+ - name: Check OVF_STORE volume status
+ command: >-
+ vdsm-client Volume getInfo storagepoolID={{ datacenter_id }}
+ storagedomainID={{ storage_domain_details.ovirt_storage_domains[0].id }}
+ imageID={{ item.id }} volumeID={{ item.image_id }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: ovf_store_status
+ retries: 12
+ delay: 10
+ until: >-
+ ovf_store_status.rc == 0 and ovf_store_status.stdout|from_json|json_query('status') == 'OK' and
+ ovf_store_status.stdout|from_json|json_query('description')|from_json|json_query('Updated')
+ with_items: "{{ ovf_store_disks }}"
+ - debug: var=ovf_store_status
+ - name: Wait for OVF_STORE disk content
+ shell: >-
+ vdsm-client Image prepare storagepoolID={{ datacenter_id }}
+ storagedomainID={{ storage_domain_details.ovirt_storage_domains[0].id }} imageID={{ item.id }}
+ volumeID={{ item.image_id }} | grep path | awk '{ print $2 }' |
+ xargs -I{} sudo -u vdsm dd if={} | tar -tvf - {{ he_vm_details.vm.id }}.ovf
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: ovf_store_content
+ retries: 12
+ delay: 10
+ until: ovf_store_content.rc == 0
+ with_items: "{{ ovf_store_disks }}"
+ args:
+ warn: false
+ - name: Prepare images
+ command: >-
+ vdsm-client Image prepare storagepoolID={{ datacenter_id }}
+ storagedomainID={{ storage_domain_details.ovirt_storage_domains[0].id }}
+ imageID={{ item.disk.id }} volumeID={{ item.disk.image_id }}
+ environment: "{{ he_cmd_lang }}"
+ with_items:
+ - "{{ he_virtio_disk_details }}"
+ - "{{ he_conf_disk_details }}"
+ - "{{ he_metadata_disk_details }}"
+ - "{{ he_sanlock_disk_details }}"
+ register: prepareimage_results
+ changed_when: true
+ - debug: var=prepareimage_results
+ - name: Fetch Hosted Engine configuration disk path
+ set_fact:
+ he_conf_disk_path: >-
+ {{ (prepareimage_results.results|json_query("[?item.id=='" +
+ he_conf_disk_details.id + "'].stdout")|first|from_json).path }}
+ - name: Fetch Hosted Engine virtio disk path
+ set_fact:
+ he_virtio_disk_path: >-
+ {{ (prepareimage_results.results|json_query("[?item.id=='" +
+ he_virtio_disk_details.id + "'].stdout")|first|from_json).path }}
+ - name: Fetch Hosted Engine virtio metadata path
+ set_fact:
+ he_metadata_disk_path: >-
+ {{ (prepareimage_results.results|json_query("[?item.id=='" +
+ he_metadata_disk_details.id + "'].stdout")|first|from_json).path }}
+ - debug: var=he_conf_disk_path
+ - debug: var=he_virtio_disk_path
+ - debug: var=he_metadata_disk_path
+ - name: Shutdown local VM
+ command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} shutdown {{ he_vm_name }}Local"
+ environment: "{{ he_cmd_lang }}"
+ - name: Wait for local VM shutdown
+ command: virsh -r domstate "{{ he_vm_name }}Local"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: dominfo_out
+ until: dominfo_out.rc == 0 and 'shut off' in dominfo_out.stdout
+ retries: 120
+ delay: 5
+ - debug: var=dominfo_out
+ - name: Undefine local VM
+ command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} undefine {{ he_vm_name }}Local"
+ environment: "{{ he_cmd_lang }}"
+ - name: Update libvirt default network configuration, destroy
+ command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} net-destroy default"
+ environment: "{{ he_cmd_lang }}"
+ - name: Update libvirt default network configuration, undefine
+ command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} net-undefine default"
+ environment: "{{ he_cmd_lang }}"
+ ignore_errors: true
+ - name: Detect ovirt-hosted-engine-ha version
+ command: >-
+ {{ ansible_python.executable }} -c
+ 'from ovirt_hosted_engine_ha.agent import constants as agentconst; print(agentconst.PACKAGE_VERSION)'
+ environment: "{{ he_cmd_lang }}"
+ register: ha_version_out
+ changed_when: true
+ - name: Set ha_version
+ set_fact: ha_version="{{ ha_version_out.stdout_lines|first }}"
+ - debug: var=ha_version
+ - name: Create configuration templates
+ template:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ mode: 0644
+ with_items:
+ - {src: templates/vm.conf.j2, dest: "{{ he_local_vm_dir }}/vm.conf"}
+ - {src: templates/broker.conf.j2, dest: "{{ he_local_vm_dir }}/broker.conf"}
+ - {src: templates/version.j2, dest: "{{ he_local_vm_dir }}/version"}
+ - {src: templates/fhanswers.conf.j2, dest: "{{ he_local_vm_dir }}/fhanswers.conf"}
+ - {src: templates/hosted-engine.conf.j2, dest: "{{ he_local_vm_dir }}/hosted-engine.conf"}
+ - name: Create configuration archive
+ command: >-
+ tar --record-size=20480 -cvf {{ he_conf_disk_details.disk.image_id }}
+ vm.conf broker.conf version fhanswers.conf hosted-engine.conf
+ environment: "{{ he_cmd_lang }}"
+ args:
+ chdir: "{{ he_local_vm_dir }}"
+ warn: false
+ changed_when: true
+ tags: ['skip_ansible_lint']
+ - name: Create ovirt-hosted-engine-ha run directory
+ file:
+ path: /var/run/ovirt-hosted-engine-ha
+ state: directory
+ mode: 0755
+ - name: Copy configuration files to the right location on host
+ copy:
+ remote_src: true
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ mode: 0644
+ with_items:
+ - {src: "{{ he_local_vm_dir }}/vm.conf", dest: /var/run/ovirt-hosted-engine-ha}
+ - {src: "{{ he_local_vm_dir }}/hosted-engine.conf", dest: /etc/ovirt-hosted-engine/}
+ - name: Copy configuration archive to storage
+ command: >-
+ dd bs=20480 count=1 oflag=direct if="{{ he_local_vm_dir }}/{{ he_conf_disk_details.disk.image_id }}"
+ of="{{ he_conf_disk_path }}"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: vdsm
+ become_method: sudo
+ changed_when: true
+ args:
+ warn: false
+ - name: Initialize metadata volume
+ command: dd bs=1M count=1024 oflag=direct if=/dev/zero of="{{ he_metadata_disk_path }}"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: vdsm
+ become_method: sudo
+ changed_when: true
+ - include_tasks: get_local_vm_disk_path.yml
+ - name: Generate DHCP network configuration for the engine VM
+ template:
+ src: templates/ifcfg-eth0-dhcp.j2
+ dest: "{{ he_local_vm_dir }}/ifcfg-eth0"
+ owner: root
+ group: root
+ mode: 0644
+ when: he_vm_ip_addr is none
+ - name: Generate static network configuration for the engine VM, IPv4
+ template:
+ src: templates/ifcfg-eth0-static.j2
+ dest: "{{ he_local_vm_dir }}/ifcfg-eth0"
+ owner: root
+ group: root
+ mode: 0644
+ when: he_vm_ip_addr is not none and he_vm_ip_addr | ipv4
+ - name: Generate static network configuration for the engine VM, IPv6
+ template:
+ src: templates/ifcfg-eth0-static-ipv6.j2
+ dest: "{{ he_local_vm_dir }}/ifcfg-eth0"
+ owner: root
+ group: root
+ mode: 0644
+ when: he_vm_ip_addr is not none and he_vm_ip_addr | ipv6
+ - name: Inject network configuration with guestfish
+ command: >-
+ guestfish -a {{ local_vm_disk_path }} --rw -i copy-in "{{ he_local_vm_dir }}/ifcfg-eth0"
+ /etc/sysconfig/network-scripts {{ ":" }} selinux-relabel /etc/selinux/targeted/contexts/files/file_contexts
+ /etc/sysconfig/network-scripts/ifcfg-eth0 force{{ ":" }}true
+ environment:
+ LIBGUESTFS_BACKEND: direct
+ LANG: en_US.UTF-8
+ LC_MESSAGES: en_US.UTF-8
+ LC_ALL: en_US.UTF-8
+ changed_when: true
+ - name: Extract /etc/hosts from the Hosted Engine VM
+ command: virt-copy-out -a {{ local_vm_disk_path }} /etc/hosts "{{ he_local_vm_dir }}"
+ environment:
+ LIBGUESTFS_BACKEND: direct
+ LANG: en_US.UTF-8
+ LC_MESSAGES: en_US.UTF-8
+ LC_ALL: en_US.UTF-8
+ changed_when: true
+ - name: Clean /etc/hosts for the Hosted Engine VM for Engine VM FQDN
+ lineinfile:
+ dest: "{{ he_local_vm_dir }}/hosts"
+ regexp: "# hosted-engine-setup-{{ hostvars[he_ansible_host_name]['he_local_vm_dir'] }}$"
+ state: absent
+ - name: Add an entry on /etc/hosts for the Hosted Engine VM for the VM itself
+ lineinfile:
+ dest: "{{ he_local_vm_dir }}/hosts"
+ line: "{{ he_vm_ip_addr }} {{ he_fqdn }}"
+ state: present
+ when: he_vm_etc_hosts and he_vm_ip_addr is not none
+ - name: Clean /etc/hosts for the Hosted Engine VM for host address
+ lineinfile:
+ dest: "{{ he_local_vm_dir }}/hosts"
+ line: "{{ he_host_ip }} {{ he_host_address }}"
+ state: absent
+ when: not he_vm_etc_hosts
+ - name: Inject /etc/hosts with guestfish
+ command: >-
+ guestfish -a {{ local_vm_disk_path }} --rw -i copy-in "{{ he_local_vm_dir }}/hosts"
+ /etc {{ ":" }} selinux-relabel /etc/selinux/targeted/contexts/files/file_contexts
+ /etc/hosts force{{ ":" }}true
+ environment:
+ LIBGUESTFS_BACKEND: direct
+ LANG: en_US.UTF-8
+ LC_MESSAGES: en_US.UTF-8
+ LC_ALL: en_US.UTF-8
+ changed_when: true
+ - name: Copy local VM disk to shared storage
+ command: >-
+ qemu-img convert -f qcow2 -O raw -t none -T none {{ local_vm_disk_path }} {{ he_virtio_disk_path }}
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: vdsm
+ become_method: sudo
+ changed_when: true
+ - name: Verify copy of VM disk
+ command: qemu-img compare {{ local_vm_disk_path }} {{ he_virtio_disk_path }}
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: vdsm
+ become_method: sudo
+ changed_when: true
+ when: he_debug_mode|bool
+ - name: Remove temporary entry in /etc/hosts for the local VM
+ lineinfile:
+ dest: /etc/hosts
+ regexp: "# temporary entry added by hosted-engine-setup for the bootstrap VM$"
+ state: absent
+ - name: Start ovirt-ha-broker service on the host
+ service:
+ name: ovirt-ha-broker
+ state: started
+ enabled: true
+ - name: Initialize lockspace volume
+ command: hosted-engine --reinitialize-lockspace --force
+ environment: "{{ he_cmd_lang }}"
+ register: result
+ until: result.rc == 0
+ ignore_errors: true
+ retries: 5
+ delay: 10
+ changed_when: true
+ - debug: var=result
+ - block:
+ - name: Workaround for ovirt-ha-broker start failures
+ # Ugly workaround for https://bugzilla.redhat.com/1768511
+ # fix it on ovirt-ha-broker side and remove ASAP
+ systemd:
+ state: restarted
+ enabled: true
+ name: ovirt-ha-broker
+ - name: Initialize lockspace volume
+ command: hosted-engine --reinitialize-lockspace --force
+ environment: "{{ he_cmd_lang }}"
+ register: result2
+ until: result2.rc == 0
+ retries: 5
+ delay: 10
+ changed_when: true
+ - debug: var=result2
+ when: result.rc != 0
+ - name: Start ovirt-ha-agent service on the host
+ service:
+ name: ovirt-ha-agent
+ state: started
+ enabled: true
+ - name: Exit HE maintenance mode
+ command: hosted-engine --set-maintenance --mode=none
+ environment: "{{ he_cmd_lang }}"
+ register: mresult
+ until: mresult.rc == 0
+ retries: 3
+ delay: 10
+ changed_when: true
+ - debug: var=mresult
+ - name: Wait for the engine to come up on the target VM
+ block:
+ - name: Check engine VM health
+ command: hosted-engine --vm-status --json
+ environment: "{{ he_cmd_lang }}"
+ register: health_result
+ until: >-
+ health_result.rc == 0 and 'health' in health_result.stdout and
+ health_result.stdout|from_json|json_query('*."engine-status"."health"')|first=="good"
+ retries: 180
+ delay: 5
+ changed_when: true
+ - debug: var=health_result
+ rescue:
+ - name: Check VM status at virt level
+ shell: virsh -r list | grep {{ he_vm_name }} | grep running
+ environment: "{{ he_cmd_lang }}"
+ ignore_errors: true
+ changed_when: true
+ register: vm_status_virsh
+ - debug: var=vm_status_virsh
+ - name: Fail if engine VM is not running
+ fail:
+ msg: "Engine VM is not running, please check vdsm logs"
+ when: vm_status_virsh.rc != 0
+ - name: Get target engine VM IP address
+ shell: getent {{ ip_key }} {{ he_fqdn }} | cut -d' ' -f1 | uniq
+ environment: "{{ he_cmd_lang }}"
+ register: engine_vm_ip
+ changed_when: true
+ - name: Get VDSM's target engine VM stats
+ command: vdsm-client VM getStats vmID={{ he_vm_details.vm.id }}
+ environment: "{{ he_cmd_lang }}"
+ register: engine_vdsm_stats
+ changed_when: true
+ - name: Convert stats to JSON format
+ set_fact: json_stats={{ engine_vdsm_stats.stdout|from_json }}
+ - name: Get target engine VM IP address from VDSM stats
+ set_fact: engine_vm_ip_vdsm={{ json_stats[0].guestIPs }}
+ - debug: var=engine_vm_ip_vdsm
+ - name: Fail if Engine IP is different from engine's he_fqdn resolved IP
+ fail:
+ msg: >-
+ Engine VM IP address is {{ engine_vm_ip_vdsm }} while the engine's he_fqdn {{ he_fqdn }} resolves to
+ {{ engine_vm_ip.stdout_lines[0] }}. If you are using DHCP, check your DHCP reservation configuration
+ when: engine_vm_ip_vdsm != engine_vm_ip.stdout_lines[0]
+ - name: Fail is for any other reason the engine didn't started
+ fail:
+ msg: The engine failed to start inside the engine VM; please check engine.log.
+ - name: Get target engine VM address
+ shell: getent {{ ip_key }} {{ he_fqdn }} | cut -d ' ' -f1 | uniq
+ environment: "{{ he_cmd_lang }}"
+ register: engine_vm_ip
+ when: engine_vm_ip is not defined
+ changed_when: true
+ # Workaround for ovn-central being configured with the address of the bootstrap VM.
+ # Keep this aligned with:
+ # https://github.com/oVirt/ovirt-engine/blob/master/packaging/playbooks/roles/ovirt-provider-ovn-driver/tasks/main.yml
+ - name: Reconfigure OVN central address
+ command: vdsm-tool ovn-config {{ engine_vm_ip.stdout_lines[0] }} {{ he_mgmt_network }}
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ # Workaround for https://bugzilla.redhat.com/1540107
+ # the engine fails deleting a VM if its status in the engine DB
+ # is not up to date.
+ - include_tasks: auth_sso.yml
+ - name: Check for the local bootstrap VM
+ ovirt_vm_info:
+ pattern: id="{{ external_local_vm_uuid.stdout_lines|first }}"
+ auth: "{{ ovirt_auth }}"
+ register: local_vm_f
+ - name: Remove the bootstrap local VM
+ block:
+ - name: Make the engine aware that the external VM is stopped
+ ignore_errors: true
+ ovirt_vm:
+ state: stopped
+ id: "{{ external_local_vm_uuid.stdout_lines|first }}"
+ auth: "{{ ovirt_auth }}"
+ register: vmstop_result
+ - debug: var=vmstop_result
+ - name: Wait for the local bootstrap VM to be down at engine eyes
+ ovirt_vm_info:
+ pattern: id="{{ external_local_vm_uuid.stdout_lines|first }}"
+ auth: "{{ ovirt_auth }}"
+ register: local_vm_status
+ until: local_vm_status.ovirt_vms[0].status == "down"
+ retries: 24
+ delay: 5
+ - debug: var=local_vm_status
+ - name: Remove bootstrap external VM from the engine
+ ovirt_vm:
+ state: absent
+ id: "{{ external_local_vm_uuid.stdout_lines|first }}"
+ auth: "{{ ovirt_auth }}"
+ register: vmremove_result
+ - debug: var=vmremove_result
+ when: local_vm_f.ovirt_vms|length > 0
+ - name: Remove ovirt-engine-appliance rpm
+ yum:
+ name: ovirt-engine-appliance
+ state: absent
+ register: yum_result
+ until: yum_result is success
+ retries: 10
+ delay: 5
+ when: he_remove_appliance_rpm|bool
+
+ - name: Include custom tasks for after setup customization
+ include_tasks: "{{ item }}"
+ with_fileglob: "hooks/after_setup/*.yml"
+ register: after_setup_results
+ - debug: var=after_setup_results
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fc_getdevices.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fc_getdevices.yml
new file mode 100644
index 00000000..b5ca3f76
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fc_getdevices.yml
@@ -0,0 +1,11 @@
+---
+- include_tasks: auth_sso.yml
+- name: Get Fibre Channel LUNs
+ ovirt_host_storage_info:
+ host: "{{ he_host_name }}"
+ fcp:
+ lun_id: -1 # currently it is unused and I use it to turn on FC filtering
+ auth: "{{ ovirt_auth }}"
+ register: otopi_fc_devices
+- debug: var=otopi_fc_devices
+...
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_engine_logs.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_engine_logs.yml
new file mode 100644
index 00000000..d7c135f2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_engine_logs.yml
@@ -0,0 +1,29 @@
+---
+- name: Set destination directory path
+ set_fact:
+ destdir=/var/log/ovirt-hosted-engine-setup/engine-logs-{{ ansible_date_time.iso8601 }}/
+- name: Create destination directory
+ file:
+ state: directory
+ path: "{{ destdir }}"
+ owner: root
+ group: root
+ mode: 0700
+- include_tasks: get_local_vm_disk_path.yml
+- name: Give the vm time to flush dirty buffers
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+ become: false
+- name: Copy engine logs
+ command: virt-copy-out -a {{ local_vm_disk_path }} {{ item }} {{ destdir }}
+ environment:
+ LIBGUESTFS_BACKEND: direct
+ LANG: en_US.UTF-8
+ LC_MESSAGES: en_US.UTF-8
+ LC_ALL: en_US.UTF-8
+ ignore_errors: true
+ changed_when: true
+ with_items:
+ - /var/log
+ when: local_vm_disk_path is defined
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_host_ip.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_host_ip.yml
new file mode 100644
index 00000000..bd6a4531
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/fetch_host_ip.yml
@@ -0,0 +1,47 @@
+---
+- name: Get full hostname
+ command: hostname -f
+ changed_when: true
+ register: host_full_name
+- name: Set hostname variable if not defined
+ set_fact:
+ he_host_name: "{{ host_full_name.stdout_lines[0] }}"
+ when: he_host_name is none
+- debug: var=he_host_name
+- name: Define host address variable if not defined
+ set_fact:
+ he_host_address: "{{ host_full_name.stdout_lines[0] }}"
+ when: he_host_address is none
+- debug: var=he_host_address
+
+- name: Get host IP address
+ block:
+ - name: Choose IPv4, IPv6 or auto
+ import_tasks: ipv_switch.yml
+ - name: Get host address resolution
+ shell: getent {{ ip_key }} {{ he_host_address }} | grep STREAM
+ register: hostname_resolution_output
+ changed_when: true
+ ignore_errors: true
+ - debug: var=hostname_resolution_output
+ - name: Check address resolution
+ fail:
+ msg: >
+ Unable to resolve address
+ when: hostname_resolution_output.rc != 0
+ - name: Parse host address resolution
+ set_fact:
+ he_host_ip: "{{
+ (
+ hostname_resolution_output.stdout.split() | ipaddr |
+ difference(hostname_resolution_output.stdout.split() |
+ ipaddr('link-local')
+ )
+ )[0]
+ }}"
+ - debug: var=he_host_ip
+
+- name: Fail if host's ip is empty
+ fail:
+ msg: Host has no IP address
+ when: he_host_ip is none
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_team_devices.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_team_devices.yml
new file mode 100644
index 00000000..c15c45e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/filter_team_devices.yml
@@ -0,0 +1,35 @@
+---
+- name: Collect interface types
+ shell: set -euo pipefail && nmcli -g GENERAL.TYPE device show {{ nic }}
+ with_items:
+ - "{{ host_net }}"
+ loop_control:
+ loop_var: nic
+ changed_when: true
+ register: interface_types
+- debug: var=interface_types
+- name: Check for Team devices
+ set_fact:
+ is_team: "{{ nic_if.stdout.find('team') > 0 }}"
+ when: nic_if.stdout.find('team') != -1
+ with_items:
+ - "{{ interface_types.results }}"
+ loop_control:
+ loop_var: nic_if
+ register: team_list
+- debug: var=team_list
+- name: Get list of Team devices
+ set_fact:
+ team_if: "{{ team_list.results | reject('skipped') | map(attribute='nic_if.nic') | list }}"
+- debug: var=team_if
+- name: Filter unsupported interface types
+ set_fact:
+ otopi_host_net: "{{ host_net | difference(team_if) }}"
+ register: otopi_host_net
+- debug: var=otopi_host_net
+- name: Failed if only teaming devices are available
+ fail:
+ msg: >-
+ Only Team devices {{ team_if | join(', ') }} are present.
+ Teaming is not supported.
+ when: (otopi_host_net.ansible_facts.otopi_host_net | length == 0)
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/final_clean.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/final_clean.yml
new file mode 100644
index 00000000..1000d2ed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/final_clean.yml
@@ -0,0 +1,11 @@
+---
+- name: Clean temporary resources
+ block:
+ - name: Fetch logs from the engine VM
+ include_tasks: fetch_engine_logs.yml
+ ignore_errors: true
+ - include_tasks: clean_localvm_dir.yml
+ - name: Clean local storage pools
+ include_tasks: clean_local_storage_pools.yml
+ ignore_errors: true
+...
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/full_execution.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/full_execution.yml
new file mode 100644
index 00000000..e7748812
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/full_execution.yml
@@ -0,0 +1,73 @@
+---
+- name: Install packages and bootstrap local engine VM
+ block:
+ - name: Install required packages for oVirt Hosted Engine deployment
+ import_tasks: install_packages.yml
+
+ - name: System configuration validations
+ include_tasks: "{{ item }}"
+ with_fileglob: "pre_checks/*.yml"
+
+ - name: Clean environment before deployment
+ import_tasks: initial_clean.yml
+
+ - name: 01_02 bootstrap local vm tasks
+ block:
+ - name: 01 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/01_prepare_routing_rules.yml
+
+ - name: 02 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/02_create_local_vm.yml
+
+- name: Local engine VM installation - Pre tasks
+ block:
+ - name: 03 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/03_engine_initial_tasks.yml
+ delegate_to: "{{ groups.engine[0] }}"
+
+- name: Engine Setup on local VM
+ block:
+ - name: Engine Setup on local VM
+ vars:
+ ovirt_engine_setup_hostname: "{{ he_fqdn.split('.')[0] }}"
+ ovirt_engine_setup_organization: "{{ he_cloud_init_domain_name }}"
+ ovirt_engine_setup_dwh_db_host: "{{ he_fqdn.split('.')[0] }}"
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_engine_setup_answer_file_path: /root/ovirt-engine-answers
+ ovirt_engine_setup_use_remote_answer_file: true
+ ovirt_engine_setup_offline: "{{ he_offline_deployment }}"
+ ovirt_engine_setup_package_list: "{{ he_additional_package_list }}"
+ ovirt_engine_setup_admin_password: "{{ he_admin_password }}"
+ import_role:
+ name: ovirt.ovirt.engine_setup
+ delegate_to: "{{ groups.engine[0] }}"
+
+- name: Local engine VM installation - Post tasks
+ block:
+ - name: 04 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/04_engine_final_tasks.yml
+ delegate_to: "{{ groups.engine[0] }}"
+
+- name: Configure engine VM on a storage domain
+ block:
+ - name: 05 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/05_add_host.yml
+ - name: Create Storage Domain
+ import_tasks: create_storage_domain.yml
+ - name: Create target hosted engine vm
+ import_tasks: create_target_vm/01_create_target_hosted_engine_vm.yml
+
+- name: Configure database settings
+ import_tasks: create_target_vm/02_engine_vm_configuration.yml
+ delegate_to: "{{ groups.engine[0] }}"
+
+- name: Closeup
+ block:
+ - name: Hosted engine final tasks
+ import_tasks: create_target_vm/03_hosted_engine_final_tasks.yml
+ - name: Sync on engine machine
+ command: sync
+ changed_when: true
+ delegate_to: "{{ groups.engine[0] }}"
+ - name: Final clean
+ import_tasks: final_clean.yml
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_local_vm_disk_path.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_local_vm_disk_path.yml
new file mode 100644
index 00000000..c6470281
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/get_local_vm_disk_path.yml
@@ -0,0 +1,13 @@
+---
+- name: Find the local appliance image
+ find:
+ paths: "{{ he_local_vm_dir }}/images"
+ recurse: true
+ patterns: ^.*.(?<!meta)$
+ use_regex: true
+ register: app_img
+- debug: var=app_img
+- name: Set local_vm_disk_path
+ set_fact:
+ local_vm_disk_path={{ app_img.files[0].path }}
+ when: app_img.files|length > 0
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/initial_clean.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/initial_clean.yml
new file mode 100644
index 00000000..a7cb99c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/initial_clean.yml
@@ -0,0 +1,123 @@
+---
+- name: initial clean
+ tags: he_initial_clean
+ block:
+ - name: Stop libvirt service
+ service:
+ name: libvirtd
+ state: stopped
+ enabled: true
+ - name: Drop vdsm config statements
+ command: >-
+ sed -i
+ '/## beginning of configuration section by
+ vdsm-4.[0-9]\+.[0-9]\+/,/## end of configuration section by vdsm-4.[0-9]\+.[0-9]\+/d' {{ item }}
+ environment: "{{ he_cmd_lang }}"
+ args:
+ warn: false
+ with_items:
+ - /etc/libvirt/libvirtd.conf
+ - /etc/libvirt/qemu.conf
+ - /etc/libvirt/qemu-sanlock.conf
+ - /etc/sysconfig/libvirtd
+ tags: ['skip_ansible_lint']
+ - name: Drop VNC encryption config statements
+ command: >-
+ sed -i
+ '/## beginning of configuration section for VNC encryption/,/##
+ end of configuration section for VNC encryption\+/d' /etc/libvirt/qemu.conf
+ args:
+ warn: false
+ environment: "{{ he_cmd_lang }}"
+ - name: Restore initial abrt config files
+ copy:
+ remote_src: true
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ mode: preserve
+ with_items:
+ - {
+ src: /usr/share/abrt/conf.d/abrt-action-save-package-data.conf,
+ dest: /etc/abrt/abrt-action-save-package-data.conf
+ }
+ - {
+ src: /usr/share/abrt/conf.d/abrt.conf,
+ dest: /etc/abrt/abrt.conf
+ }
+ - {
+ src: /usr/share/abrt/conf.d/plugins/CCpp.conf,
+ dest: /etc/abrt/plugins/CCpp.conf
+ }
+ - {
+ src: /usr/share/abrt/conf.d/plugins/vmcore.conf,
+ dest: /etc/abrt/plugins/vmcore.conf
+ }
+ - name: Restart abrtd service
+ service:
+ name: abrtd
+ state: restarted
+ - name: Drop libvirt sasl2 configuration by vdsm
+ command: >-
+ sed -i '/## start vdsm-4.[0-9]\+.[0-9]\+ configuration/,/## end vdsm configuration/d' /etc/sasl2/libvirt.conf
+ environment: "{{ he_cmd_lang }}"
+ args:
+ warn: false
+ tags: ['skip_ansible_lint']
+ - name: Stop and disable services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ enabled: false
+ with_items:
+ - ovirt-ha-agent
+ - ovirt-ha-broker
+ - vdsmd
+ - libvirtd-tls.socket
+ - name: Restore initial libvirt default network configuration
+ copy:
+ remote_src: true
+ src: /usr/share/libvirt/networks/default.xml
+ dest: /etc/libvirt/qemu/networks/default.xml
+ mode: preserve
+ - name: Start libvirt
+ service:
+ name: libvirtd
+ state: started
+ enabled: true
+ - name: Check for leftover local Hosted Engine VM
+ shell: virsh list | grep {{ he_vm_name }}Local | cat
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: local_vm_list
+ - name: Destroy leftover local Hosted Engine VM
+ command: virsh destroy {{ he_vm_name }}Local
+ environment: "{{ he_cmd_lang }}"
+ ignore_errors: true
+ when: local_vm_list.stdout_lines|length >= 1
+ - name: Check for leftover defined local Hosted Engine VM
+ shell: virsh list --all | grep {{ he_vm_name }}Local | cat
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: local_vm_list_all
+ - name: Undefine leftover local engine VM
+ command: virsh undefine --managed-save {{ he_vm_name }}Local
+ environment: "{{ he_cmd_lang }}"
+ when: local_vm_list_all.stdout_lines|length >= 1
+ changed_when: true
+ - name: Check for leftover defined Hosted Engine VM
+ shell: virsh list --all | grep {{ he_vm_name }} | cat
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+ register: target_vm_list_all
+ - name: Undefine leftover engine VM
+ command: virsh undefine --managed-save {{ he_vm_name }}
+ environment: "{{ he_cmd_lang }}"
+ when: target_vm_list_all.stdout_lines|length >= 1
+ changed_when: true
+ - name: Remove eventually entries for the local VM from known_hosts file
+ known_hosts:
+ name: "{{ he_fqdn }}"
+ state: absent
+ delegate_to: localhost
+ become: false
+...
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_appliance.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_appliance.yml
new file mode 100644
index 00000000..733305b4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_appliance.yml
@@ -0,0 +1,36 @@
+---
+- name: Install ovirt-engine-appliance rpm
+ yum:
+ name: ovirt-engine-appliance
+ state: present
+ register: task_result
+ until: task_result is success
+ retries: 10
+ delay: 2
+- name: Parse appliance configuration for path
+ shell: set -euo pipefail && grep path /etc/ovirt-hosted-engine/10-appliance.conf | cut -f2 -d'='
+ environment: "{{ he_cmd_lang }}"
+ register: he_appliance_ova_out
+ changed_when: true
+- debug: var=he_appliance_ova_out
+- name: Parse appliance configuration for sha1sum
+ shell: set -euo pipefail && grep sha1sum /etc/ovirt-hosted-engine/10-appliance.conf | cut -f2 -d'='
+ environment: "{{ he_cmd_lang }}"
+ register: he_appliance_ova_sha1
+ changed_when: true
+- debug: var=he_appliance_ova_sha1
+- name: Get OVA path
+ set_fact:
+ he_appliance_ova_path: "{{ he_appliance_ova_out.stdout_lines|first }}"
+ cacheable: true
+- debug: var=he_appliance_ova_path
+- name: Compute sha1sum
+ stat:
+ path: "{{ he_appliance_ova_path }}"
+ checksum_algorithm: sha1
+ register: ova_stats
+- debug: var=ova_stats
+- name: Compare sha1sum
+ fail:
+ msg: "{{ he_appliance_ova_path }} is corrupted (sha1sum)"
+ when: he_appliance_ova_sha1.stdout_lines|first != ova_stats.stat.checksum
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_packages.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_packages.yml
new file mode 100644
index 00000000..54b3016a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/install_packages.yml
@@ -0,0 +1,9 @@
+---
+- name: Install oVirt Hosted Engine packages
+ package:
+ name: "ovirt-hosted-engine-setup"
+ state: present
+ register: task_result
+ until: task_result is success
+ retries: 10
+ delay: 2
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/ipv_switch.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/ipv_switch.yml
new file mode 100644
index 00000000..98c286d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/ipv_switch.yml
@@ -0,0 +1,11 @@
+---
+- name: Choose IPv4, IPv6 or auto
+ block:
+ - name: Fail if he_force_ip4 and he_force_ip6 are set at the same time
+ fail:
+ msg: he_force_ip4 and he_force_ip6 cannot be used at the same time
+ when: he_force_ip4 and he_force_ip6
+ - name: Prepare getent key
+ set_fact:
+ ip_key: "{{ 'ahostsv4' if he_force_ip4 else 'ahostsv6' if he_force_ip6 else 'ahosts' }}"
+ when: ip_key is not defined
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_discover.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_discover.yml
new file mode 100644
index 00000000..a88a8e23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_discover.yml
@@ -0,0 +1,39 @@
+---
+- include_tasks: auth_sso.yml
+- name: Prepare iSCSI parameters
+ set_fact:
+ iscsid:
+ iscsi:
+ address: "{{ he_iscsi_portal_addr }}"
+ port: "{{ he_iscsi_portal_port }}"
+ username: "{{ he_iscsi_discover_username }}"
+ password: "{{ he_iscsi_discover_password }}"
+ no_log: true
+- name: Fetch host facts
+ ovirt_host_info:
+ pattern: name={{ he_host_name }}
+ auth: "{{ ovirt_auth }}"
+ register: host_result
+ until: host_result is succeeded and host_result.ovirt_hosts|length >= 1
+ retries: 50
+ delay: 10
+- debug: var=host_result
+- name: iSCSI discover with REST API
+ uri:
+ url: https://{{ he_fqdn }}/ovirt-engine/api/hosts/{{ host_result.ovirt_hosts[0].id }}/iscsidiscover
+ validate_certs: false
+ method: POST
+ body: "{{ iscsid | to_json }}"
+ return_content: true
+ body_format: json
+ status_code: 200
+ headers:
+ Content-Type: application/json
+ Accept: application/json
+ Authorization: "Basic {{ ('admin@internal' + ':' + he_admin_password ) | b64encode }}"
+ register: otopi_iscsi_targets
+- debug: var=otopi_iscsi_targets
+# TODO: perform an iSCSI logout when viable, see:
+# https://bugzilla.redhat.com/show_bug.cgi?id=1535951
+# https://github.com/ansible/ansible/issues/35039
+...
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_getdevices.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_getdevices.yml
new file mode 100644
index 00000000..dd909a65
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/iscsi_getdevices.yml
@@ -0,0 +1,34 @@
+---
+- include_tasks: auth_sso.yml
+- name: iSCSI login
+ ovirt_host:
+ name: "{{ he_host_name }}"
+ state: iscsilogin
+ timeout: 30
+ iscsi:
+ username: "{{ he_iscsi_username }}"
+ password: "{{ he_iscsi_password }}"
+ address: "{{ item.0 }}"
+ port: "{{ item.1 }}"
+ target: "{{ he_iscsi_target }}"
+ auth: "{{ ovirt_auth }}"
+ no_log: true
+ ignore_errors: true
+ # TODO: avoid the with_together loop once
+ # https://github.com/ansible/ansible/issues/32640 got properly fixed
+ with_together:
+ - "{{ he_iscsi_portal_addr.split(',') }}"
+ - "{{ he_iscsi_portal_port.split(',') if he_iscsi_portal_port is string else he_iscsi_portal_port }}"
+- name: Get iSCSI LUNs
+ ovirt_host_storage_info:
+ host: "{{ he_host_name }}"
+ iscsi:
+ username: "{{ he_iscsi_username }}"
+ password: "{{ he_iscsi_password }}"
+ address: "{{ he_iscsi_portal_addr.split(',')|first }}"
+ port: "{{ he_iscsi_portal_port.split(',')|first if he_iscsi_portal_port is string else he_iscsi_portal_port }}"
+ target: "{{ he_iscsi_target }}"
+ auth: "{{ ovirt_auth }}"
+ register: otopi_iscsi_devices
+- debug: var=otopi_iscsi_devices
+...
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/main.yml
new file mode 100644
index 00000000..b6b92090
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+- name: Deploy hosted-engine
+ import_tasks: full_execution.yml
+ tags: always
+
+- name: Execute just a specific set of steps
+ include_tasks: partial_execution.yml
+ tags:
+ - initial_clean
+ - final_clean
+ - bootstrap_local_vm
+ - create_storage_domain
+ - create_target_vm
+ - iscsi_discover
+ - iscsi_getdevices
+ - fc_getdevices
+ - get_network_interfaces
+ - validate_hostnames
+ - never
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/partial_execution.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/partial_execution.yml
new file mode 100644
index 00000000..6d1aaa5b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/partial_execution.yml
@@ -0,0 +1,152 @@
+---
+- name: Force facts gathering
+ setup:
+ tags:
+ - initial_clean
+ - final_clean
+ - bootstrap_local_vm
+ - create_storage_domain
+ - create_target_vm
+ - iscsi_discover
+ - iscsi_getdevices
+ - fc_getdevices
+ - get_network_interfaces
+ - validate_hostnames
+ - never
+
+
+- name: Initial validations and cleanups
+ block:
+ - name: Install required packages for oVirt Hosted Engine deployment
+ import_tasks: install_packages.yml
+
+ - name: System configuration validations
+ include_tasks: "{{ item }}"
+ with_fileglob: "pre_checks/*.yml"
+
+ - name: Clean environment before deployment
+ import_tasks: initial_clean.yml
+ tags: [initial_clean, bootstrap_local_vm, never]
+
+
+- name: Bootstrap local engine VM
+ block:
+ - name: Bootstrap local engine VM
+ block:
+ - name: 01 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/01_prepare_routing_rules.yml
+
+ - name: 02 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/02_create_local_vm.yml
+
+ - name: Local engine VM installation - Pre tasks
+ block:
+ - name: 03 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/03_engine_initial_tasks.yml
+ delegate_to: "{{ groups.engine[0] }}"
+
+ - name: Engine Setup on local VM
+ block:
+ - name: Run engine-setup
+ vars:
+ ovirt_engine_setup_hostname: "{{ he_fqdn.split('.')[0] }}"
+ ovirt_engine_setup_organization: "{{ he_cloud_init_domain_name }}"
+ ovirt_engine_setup_dwh_db_host: "{{ he_fqdn.split('.')[0] }}"
+ ovirt_engine_setup_firewall_manager: null
+ ovirt_engine_setup_answer_file_path: /root/ovirt-engine-answers
+ ovirt_engine_setup_use_remote_answer_file: true
+ ovirt_engine_setup_offline: "{{ he_offline_deployment }}"
+ ovirt_engine_setup_package_list: "{{ he_additional_package_list }}"
+ ovirt_engine_setup_admin_password: "{{ he_admin_password }}"
+ import_role:
+ name: ovirt.ovirt.engine_setup
+ delegate_to: "{{ groups.engine[0] }}"
+ rescue:
+ - name: Sync on engine machine
+ command: sync
+ changed_when: true
+ delegate_to: "{{ groups.engine[0] }}"
+ - name: Fetch logs from the engine VM
+ import_tasks: fetch_engine_logs.yml
+ ignore_errors: true
+ delegate_to: "{{ he_ansible_host_name }}"
+ - name: Notify the user about a failure
+ fail:
+ msg: >
+ There was a failure deploying the engine on the local engine VM.
+ The system may not be provisioned according to the playbook
+ results: please check the logs for the issue,
+ fix accordingly or re-deploy from scratch.
+
+ - name: Local engine VM installation - Post tasks
+ block:
+ - name: 04 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/04_engine_final_tasks.yml
+ delegate_to: "{{ groups.engine[0] }}"
+
+ - name: Add first HE host
+ block:
+ - name: 05 Bootstrap local VM
+ import_tasks: bootstrap_local_vm/05_add_host.yml
+ tags: [bootstrap_local_vm, never]
+
+
+- name: Create hosted-engine storage domain
+ block:
+ - name: Create Storage Domain
+ import_tasks: create_storage_domain.yml
+ tags: [create_storage_domain, never]
+
+
+- name: Create and configure target VM
+ block:
+ - name: Fetch host IP address
+ import_tasks: fetch_host_ip.yml
+
+ - name: Create target hosted engine vm
+ import_tasks: create_target_vm/01_create_target_hosted_engine_vm.yml
+
+ - name: Configure database settings
+ import_tasks: create_target_vm/02_engine_vm_configuration.yml
+ delegate_to: "{{ groups.engine[0] }}"
+ tags: [create_target_vm, never]
+
+
+- name: Hosted engine final tasks
+ import_tasks: create_target_vm/03_hosted_engine_final_tasks.yml
+ tags: [create_target_vm, never]
+
+- name: Sync on engine machine
+ import_tasks: sync_on_engine_machine.yml
+ changed_when: true
+ ignore_errors: true
+ tags: [create_target_vm, final_clean, never]
+
+- name: Final clean
+ import_tasks: final_clean.yml
+ tags: [create_target_vm, final_clean, never]
+
+
+- name: Validate network interface
+ import_tasks: "pre_checks/001_validate_network_interfaces.yml"
+ tags: [get_network_interfaces, never]
+
+
+- name: Validate hostnames
+ import_tasks: "pre_checks/002_validate_hostname_tasks.yml"
+ tags: [validate_hostnames, never]
+
+
+- name: Get FC devices
+ import_tasks: "fc_getdevices.yml"
+ tags: [fc_getdevices, never]
+
+
+- name: iSCSI discover
+ import_tasks: "iscsi_discover.yml"
+ tags: [iscsi_discover, never]
+
+
+- name: Get iSCSI devices
+ import_tasks: "iscsi_getdevices.yml"
+ tags: [iscsi_getdevices, never]
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pause_execution.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pause_execution.yml
new file mode 100644
index 00000000..a33ef2b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pause_execution.yml
@@ -0,0 +1,13 @@
+---
+- name: Create temporary lock file
+ tempfile:
+ state: file
+ suffix: _he_setup_lock
+ delegate_to: localhost
+ register: he_setup_lock_file
+- name: Pause execution until {{ he_setup_lock_file.path }} is removed, delete it once ready to proceed
+ wait_for:
+ path: "{{ he_setup_lock_file.path }}"
+ state: absent
+ timeout: 86400 # 24 hours
+ delegate_to: localhost
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/001_validate_network_interfaces.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/001_validate_network_interfaces.yml
new file mode 100644
index 00000000..e4f7f543
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/001_validate_network_interfaces.yml
@@ -0,0 +1,79 @@
+---
+- name: Network interfaces
+ block:
+ - name: Detecting interface on existing management bridge
+ set_fact:
+ bridge_interface="{{ hostvars[inventory_hostname]['ansible_' + bridge_name ]['interfaces']|first }}"
+ when: "'ansible_' + bridge_name in hostvars[inventory_hostname]"
+ with_items:
+ - 'ovirtmgmt'
+ - 'rhevm'
+ loop_control:
+ loop_var: bridge_name
+ - debug: var=bridge_interface
+ - name: Get all active network interfaces
+ vars:
+ acceptable_bond_modes: ['active-backup', 'balance-xor', 'broadcast', '802.3ad']
+ set_fact:
+ otopi_net_host="{{ hostvars[inventory_hostname]['ansible_' + iface_item]['device'] }}"
+ type="{{ hostvars[inventory_hostname]['ansible_' + iface_item]['type'] }}"
+ bond_valid_name="{{ iface_item | regex_search('(^bond[0-9]+)') }}"
+ when: (
+ (
+ iface_item != 'lo'
+ ) and (
+ bridge_interface is not defined
+ ) and (
+ 'active' in hostvars[inventory_hostname]['ansible_' + iface_item] and
+ hostvars[inventory_hostname]['ansible_' + iface_item]['active']
+ ) and (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['type'] != 'bridge'
+ ) and (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['ipv4'] is defined or
+ hostvars[inventory_hostname]['ansible_' + iface_item]['ipv6'] is defined
+ ) and (
+ (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['type'] != 'bonding'
+ ) or (
+ (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['type'] == 'bonding'
+ ) and (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['slaves'][0] is defined
+ ) and (
+ hostvars[inventory_hostname]['ansible_' + iface_item]['mode'] in acceptable_bond_modes
+ )
+ )
+ )
+ )
+ with_items:
+ - "{{ ansible_interfaces | map('replace', '-','_') | list }}"
+ loop_control:
+ loop_var: iface_item
+ register: valid_network_interfaces
+ - debug: var=valid_network_interfaces
+ - name: Filter bonds with bad naming
+ set_fact:
+ net_iface="{{ bond_item }}"
+ when: >-
+ not 'skipped' in bond_item and ((bond_item['ansible_facts']['type'] == 'ether') or
+ ( (bond_item['ansible_facts']['type'] == 'bonding') and bond_item['ansible_facts']['bond_valid_name'] ))
+ with_items:
+ - "{{ valid_network_interfaces['results'] }}"
+ loop_control:
+ loop_var: bond_item
+ register: bb_filtered_list
+ - debug: var=bb_filtered_list
+ - name: Generate output list
+ set_fact:
+ host_net: >-
+ {{ [bridge_interface] if bridge_interface is defined else bb_filtered_list.results |
+ reject('skipped') | map(attribute='bond_item.ansible_facts.otopi_net_host') | list }}
+ - debug: var=host_net
+ - import_tasks: filter_team_devices.yml
+ - name: Validate selected bridge interface if management bridge does not exist
+ fail:
+ msg: The selected network interface is not valid
+ when:
+ he_bridge_if not in otopi_host_net.ansible_facts.otopi_host_net and bridge_interface is not defined and
+ not he_just_collect_network_interfaces
+...
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/002_validate_hostname_tasks.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/002_validate_hostname_tasks.yml
new file mode 100644
index 00000000..cc9e42e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/002_validate_hostname_tasks.yml
@@ -0,0 +1,118 @@
+---
+- name: Choose IPv4, IPv6 or auto
+ import_tasks: ipv_switch.yml
+- name: Define he_host_address and he_host_ip
+ import_tasks: fetch_host_ip.yml
+ when: he_host_ip is none or he_host_address is none
+
+- name: Validate host hostname
+ block:
+ - name: Avoid localhost
+ fail:
+ msg: >
+ localhost is not a valid address
+ when: he_host_address in ['localhost', 'localhost.localdomain']
+ - name: Ensure host address resolves locally
+ fail:
+ msg: >
+ The address proposed for this host does not resolves locally
+ when: he_host_ip not in ansible_all_ipv4_addresses | union(ansible_all_ipv6_addresses)
+ - name: Ensure the resolved address resolves on the selected interface
+ block:
+ - name: Get target address from selected interface (IPv4)
+ shell: >-
+ ip addr show
+ {{ he_mgmt_network
+ if 'ansible_' + he_mgmt_network.replace('-','_') in hostvars[inventory_hostname]
+ else he_bridge_if }} |
+ grep 'inet ' |
+ cut -d' ' -f6 |
+ cut -d'/' -f1
+ register: target_address_v4
+ changed_when: true
+ - debug: var=target_address_v4
+ - name: Get target address from selected interface (IPv6)
+ shell: >-
+ ip addr show
+ {{ he_mgmt_network
+ if 'ansible_' + he_mgmt_network.replace('-','_') in hostvars[inventory_hostname]
+ else he_bridge_if }} |
+ grep 'inet6 ' |
+ cut -d' ' -f6 |
+ cut -d'/' -f1
+ register: target_address_v6
+ changed_when: true
+ - debug: var=target_address_v6
+ - name: Check the resolved address resolves on the selected interface
+ fail:
+ msg: >
+ The resolved address doesn't resolve
+ on the selected interface
+ when: >-
+ he_host_ip not in target_address_v4.stdout_lines and
+ he_host_ip not in target_address_v6.stdout_lines
+ - name: Check for alias
+ shell: getent {{ ip_key }} {{ he_host_address }} | cut -d' ' -f1 | uniq
+ register: hostname_res_count_output
+ changed_when: true
+ ignore_errors: true
+ - debug: var=hostname_res_count_output
+ - name: Filter resolved address list
+ set_fact:
+ hostname_res_count_output_filtered: >-
+ {{ hostname_res_count_output.stdout_lines |
+ difference(target_address_v6.stdout_lines) |
+ difference(target_address_v4.stdout_lines) }}
+ - name: Ensure the resolved address resolves only on the selected interface
+ fail:
+ msg: >
+ hostname '{{ he_host_address }}' doesn't uniquely match the interface
+ '{{ he_bridge_if }}' selected for the management bridge;
+ it matches also interface with IP {{ hostname_res_count_output.stdout_lines |
+ difference([he_host_ip,]) }}.
+ Please make sure that the hostname got from
+ the interface for the management network resolves
+ only there.
+ when: hostname_res_count_output_filtered|length > 0
+ when: he_bridge_if is defined and he_bridge_if is not none and he_mgmt_network is defined
+ when: he_host_address is defined and he_host_address is not none
+- name: Validate engine he_fqdn
+ block:
+ - name: Avoid localhost
+ fail:
+ msg: >
+ localhost is not a valid he_fqdn for the engine VM
+ when: he_fqdn in ['localhost', 'localhost.localdomain']
+ - name: Get engine FQDN resolution
+ shell: getent {{ ip_key }} {{ he_fqdn }} | grep STREAM
+ environment: "{{ he_cmd_lang }}"
+ register: fqdn_resolution_output
+ changed_when: true
+ ignore_errors: true
+ - debug: var=fqdn_resolution_output
+ - name: Check engine he_fqdn resolution
+ fail:
+ msg: >
+ Unable to resolve address
+ when: fqdn_resolution_output.rc != 0
+ - name: Parse engine he_fqdn resolution
+ set_fact:
+ r_fqdn_address: "{{ fqdn_resolution_output.stdout.split()[0] }}"
+ - debug: var=r_fqdn_address
+ - name: Ensure engine he_fqdn doesn't resolve locally
+ fail:
+ msg: >
+ The he_fqdn proposed for the engine VM resolves on this host
+ when: r_fqdn_address in ansible_all_ipv4_addresses | union(ansible_all_ipv6_addresses)
+ - name: Check http/https proxy
+ fail:
+ msg: >
+ Your system is configured to use a proxy, please
+ add an exception for {{ url }} with no_proxy directive.
+ when: url is ovirt.ovirt.proxied
+ loop_control:
+ loop_var: url
+ with_items:
+ - "http://{{ he_fqdn }}/"
+ - "https://{{ he_fqdn }}/"
+ when: he_fqdn is defined and he_fqdn is not none
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/define_variables.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/define_variables.yml
new file mode 100644
index 00000000..64086ee8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/define_variables.yml
@@ -0,0 +1,63 @@
+---
+- name: Define Variables
+ block:
+ - name: Define he_cloud_init_domain_name
+ block:
+ - name: Get domain name
+ command: hostname -d
+ changed_when: true
+ register: host_domain_name
+ - name: Set he_cloud_init_domain_name
+ set_fact:
+ he_cloud_init_domain_name: "{{ host_domain_name.stdout_lines[0] if host_domain_name.stdout_lines else '' }}"
+ when: he_cloud_init_domain_name is not defined
+ - debug: var=he_cloud_init_domain_name
+
+ - name: Define he_cloud_init_host_name
+ set_fact:
+ he_cloud_init_host_name: "{{ he_fqdn }}"
+ - debug: var=he_cloud_init_host_name
+
+ - name: Define he_vm_uuid
+ block:
+ - name: Get uuid
+ command: uuidgen
+ changed_when: true
+ register: uuid
+ - name: Set he_vm_uuid
+ set_fact:
+ he_vm_uuid: "{{ uuid.stdout }}"
+ - debug: var=he_vm_uuid
+
+ - name: Define he_nic_uuid
+ block:
+ - name: Get uuid
+ command: uuidgen
+ changed_when: true
+ register: uuid
+ - name: Set he_nic_uuid
+ set_fact:
+ he_nic_uuid: "{{ uuid.stdout }}"
+ - debug: var=he_nic_uuid
+
+ - name: Define he_cdrom_uuid
+ block:
+ - name: Get uuid
+ command: uuidgen
+ changed_when: true
+ register: uuid
+ - name: Set he_cdrom_uuid
+ set_fact:
+ he_cdrom_uuid: "{{ uuid.stdout }}"
+ - debug: var=he_cdrom_uuid
+
+ - name: Define Timezone
+ block:
+ - name: get timezone
+ shell: timedatectl | grep "Time zone" | awk '{print $3}'
+ changed_when: true
+ register: timezone
+ - name: Set he_time_zone
+ set_fact:
+ he_time_zone: "{{ timezone.stdout }}"
+ - debug: var=he_time_zone
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_data_center_name.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_data_center_name.yml
new file mode 100644
index 00000000..9071b2da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_data_center_name.yml
@@ -0,0 +1,15 @@
+---
+- name: Validate Data Center name format
+ block:
+ - name: Fail if Data Center name format is incorrect
+ fail:
+ msg: >-
+ "Invalid Data Center name format. Data Center name may only contain letters, numbers, '-', or '_'."
+ " Got {{ he_data_center }}"
+ when: not he_data_center | regex_search( "^[a-zA-Z0-9_-]+$" )
+ - name: Validate Cluster name
+ fail:
+ msg: >-
+ "Cluster name cannot be 'Default'. This is a reserved name for the default DataCenter. Please choose"
+ " another name for the cluster"
+ when: he_data_center != "Default" and he_cluster == "Default"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_firewalld.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_firewalld.yml
new file mode 100644
index 00000000..32dab760
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_firewalld.yml
@@ -0,0 +1,14 @@
+---
+- name: Check firewalld status
+ block:
+ - name: Check firewalld status
+ systemd:
+ name: firewalld
+ register: firewalld_s
+ - name: Enforce firewalld status
+ fail:
+ msg: >
+ firewalld is required to be enabled and active in order
+ to correctly deploy hosted-engine.
+ Please check, fix accordingly and re-deploy.
+ when: firewalld_s.status.SubState != 'running' or firewalld_s.status.LoadState == 'masked'
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_gateway.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_gateway.yml
new file mode 100644
index 00000000..85496b97
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_gateway.yml
@@ -0,0 +1,29 @@
+---
+- name: Define default gateway
+ block:
+ - name: Get default gateway IPv4
+ shell: ip r | grep default | awk '{print $3}'
+ changed_when: true
+ register: get_gateway_4
+ when: he_default_gateway_4 is not defined or he_default_gateway_4 is none or not he_default_gateway_4
+ - debug: var=get_gateway_4
+ - name: Get default gateway IPv6
+ shell: ip -6 r | grep default | awk '{print $3}'
+ changed_when: true
+ register: get_gateway_6
+ when: he_default_gateway_6 is not defined or he_default_gateway_6 is none or not he_default_gateway_6
+ - debug: var=get_gateway_6
+ - name: Set he_gateway
+ set_fact:
+ he_gateway: >-
+ {{ get_gateway_4.stdout_lines[0] if get_gateway_4.stdout_lines else
+ get_gateway_6.stdout_lines[0] if get_gateway_6.stdout_lines else
+ ''
+ }}
+ when: he_gateway is not defined or he_gateway is none or not he_gateway|trim
+ - debug: var=he_gateway
+
+- name: Fail if there is no gateway
+ fail:
+ msg: "No default gateway is defined"
+ when: he_gateway is none or not he_gateway|trim
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_mac_address.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_mac_address.yml
new file mode 100644
index 00000000..c09537f9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_mac_address.yml
@@ -0,0 +1,17 @@
+---
+- name: Define Engine VM MAC address
+ block:
+ - name: Generate unicast MAC address
+ shell: od -An -N6 -tx1 /dev/urandom | sed -e 's/^ *//' -e 's/ */:/g' -e 's/:$//' -e 's/^\(.\)[13579bdf]/\10/'
+ changed_when: true
+ register: mac_address
+ - debug: var=mac_address
+ - name: Set he_vm_mac_addr
+ set_fact:
+ he_vm_mac_addr: >-
+ {{ mac_address.stdout if he_vm_mac_addr is not defined or he_vm_mac_addr is none else he_vm_mac_addr }}
+ - name: Fail if MAC address structure is incorrect
+ fail:
+ msg: "Invalid unicast MAC address format. Got {{ he_vm_mac_addr }}"
+ when: not he_vm_mac_addr | regex_search( "^[a-fA-F0-9][02468aAcCeE](:[a-fA-F0-9]{2}){5}$" )
+- debug: var=he_vm_mac_addr
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_memory_size.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_memory_size.yml
new file mode 100644
index 00000000..4edddd8e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_memory_size.yml
@@ -0,0 +1,54 @@
+---
+- name: Get available memory amount
+ block:
+ - name: Get free memory
+ shell: free -m | grep Mem | awk '{print $4}'
+ changed_when: true
+ register: free_mem
+ - debug: var=free_mem
+ - name: Get cached memory
+ shell: free -m | grep Mem | awk '{print $6}'
+ changed_when: true
+ register: cached_mem
+ - debug: var=cached_mem
+ - name: Set Max memory
+ set_fact:
+ max_mem: "{{ free_mem.stdout|int + cached_mem.stdout|int - he_reserved_memory_MB + he_avail_memory_grace_MB }}"
+ - debug: var=max_mem
+
+- name: set he_mem_size_MB to max available if not defined
+ set_fact:
+ he_mem_size_MB: "{{ he_mem_size_MB if he_mem_size_MB != 'max' else max_mem }}"
+- debug: var=he_mem_size_MB
+
+- name: Fail if available memory is less then the minimal requirement
+ fail:
+ msg: >-
+ Available memory ( {{ max_mem }}MB ) is less then the minimal requirement ({{ he_minimal_mem_size_MB }}MB).
+ Be aware that {{ he_reserved_memory_MB }}MB is reserved for the host and cannot be allocated to the
+ engine VM.
+ when: >-
+ he_requirements_check_enabled and he_memory_requirements_check_enabled and max_mem|int < he_minimal_mem_size_MB|int
+
+- name: Fail if user chose less memory then the minimal requirement
+ fail:
+ msg: "Memory size must be at least {{ he_minimal_mem_size_MB }}MB, while you selected only {{ he_mem_size_MB }}MB"
+ when: >-
+ he_requirements_check_enabled and
+ he_memory_requirements_check_enabled and he_minimal_mem_size_MB|int > he_mem_size_MB|int
+
+- name: Fail if user chose more memory then the available memory
+ fail:
+ msg: >-
+ Not enough memory! {{ he_mem_size_MB }}MB, while only {{ max_mem }}MB are available on the host.
+ Be aware that {{ he_reserved_memory_MB }}MB is reserved for the host and cannot be allocated to the
+ engine VM.
+
+ when: >-
+ he_requirements_check_enabled and
+ he_memory_requirements_check_enabled and he_mem_size_MB|int > max_mem|int
+
+- name: Fail if he_disk_size_GB is smaller then the minimal requirement
+ fail:
+ msg: "Disk size too small: ({{ he_disk_size_GB }}GB), disk size must be at least {{ he_minimal_disk_size_GB }}GB"
+ when: he_requirements_check_enabled and he_disk_size_GB|int < he_minimal_disk_size_GB|int
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_network_test.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_network_test.yml
new file mode 100644
index 00000000..6c8354c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_network_test.yml
@@ -0,0 +1,34 @@
+---
+- name: Validate network connectivity check configuration
+ block:
+ - debug: var=he_network_test
+ - name: Fail if he_network_test is not valid
+ fail:
+ msg: "Invalid he_network_test defined"
+ changed_when: true
+ when: he_network_test not in ['dns', 'ping', 'tcp', 'none']
+ - name: Validate TCP network connectivity check parameters
+ block:
+ - debug: var=he_tcp_t_address
+ - name: Fail if he_tcp_t_address is not defined
+ fail:
+ msg: "No he_tcp_t_address is defined"
+ changed_when: true
+ when:
+ ( he_tcp_t_address is undefined ) or
+ ( he_tcp_t_address is none ) or
+ ( he_tcp_t_address|trim|length == 0 )
+ - debug: var=he_tcp_t_port
+ - name: Fail if he_tcp_t_port is not defined
+ fail:
+ msg: "No he_tcp_t_port is defined"
+ changed_when: true
+ when:
+ ( he_tcp_t_port is undefined ) or
+ ( he_tcp_t_port is none )
+ - name: Fail if he_tcp_t_port is no integer
+ fail:
+ msg: "he_tcp_t_port has to be integer"
+ changed_when: true
+ when: not he_tcp_t_port|int
+ when: he_network_test == 'tcp'
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_services_status.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_services_status.yml
new file mode 100644
index 00000000..7b640cf2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_services_status.yml
@@ -0,0 +1,17 @@
+---
+- name: Populate service facts
+ systemd:
+ name: "{{ service_item }}"
+ register: checked_services
+ with_items:
+ - firewalld
+ loop_control:
+ loop_var: service_item
+- name: Fail if the service is masked or not running
+ fail:
+ msg: "{{ service.name }} is masked or not running"
+ when: service.status.SubState != 'running' or service.status.LoadState == 'masked'
+ with_items: "{{ checked_services.results }}"
+ loop_control:
+ label: "{{ service.name }}"
+ loop_var: service
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_vcpus_count.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_vcpus_count.yml
new file mode 100644
index 00000000..a9d79442
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/pre_checks/validate_vcpus_count.yml
@@ -0,0 +1,21 @@
+---
+- name: Define he_maxvcpus
+ block:
+ - name: get max cpus
+ command: grep -c ^processor /proc/cpuinfo
+ changed_when: true
+ register: max_cpus
+ - name: Set he_maxvcpus
+ set_fact:
+ he_maxvcpus: "{{ max_cpus.stdout }}"
+ - debug: var=he_maxvcpus
+
+- name: Set he_vcpus to maximum amount if not defined
+ set_fact:
+ he_vcpus: "{{ he_vcpus if he_vcpus != 'max' else he_maxvcpus }}"
+- debug: var=he_vcpus
+
+- name: Check number of chosen CPUs
+ fail:
+ msg: "Invalid number of cpu specified: {{ he_vcpus }}, while only {{ he_maxvcpus }} are available on the host"
+ when: he_maxvcpus|int < he_vcpus|int
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_backup.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_backup.yml
new file mode 100644
index 00000000..c5231d8c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/restore_backup.yml
@@ -0,0 +1,108 @@
+---
+- name: Copy the backup file to the engine VM for restore
+ copy:
+ src: "{{ he_restore_from_file }}"
+ dest: /root/engine_backup
+ owner: root
+ group: root
+ mode: 0600
+- name: Run engine-backup
+ shell: >-
+ engine-backup --mode=restore
+ --log=/var/log/ovirt-engine/setup/restore-backup-$(date -u +%Y%m%d%H%M%S).log
+ --file=/root/engine_backup --provision-all-databases --restore-permissions
+ environment: "{{ he_cmd_lang }}"
+ register: engine_restore_out
+ changed_when: true
+- debug: var=engine_restore_out
+- name: Remove backup file
+ file:
+ state: absent
+ path: /root/engine_backup
+- name: Remove previous hosted-engine VM
+ command: >-
+ psql -d engine -c "SELECT deletevm(vm_guid) FROM (SELECT vm_guid FROM vms WHERE origin=6) t"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: postgres
+ become_method: sudo
+ changed_when: true
+ register: db_remove_old_enginevm
+- name: Update dynamic data for VMs on the host used to redeploy
+ command: >-
+ psql -d engine -c
+ "UPDATE vm_dynamic SET run_on_vds = NULL, status=0 /* Down */ WHERE run_on_vds IN
+ (SELECT vds_id FROM vds
+ WHERE upper(vds_unique_id)=upper('{{ hostvars[he_ansible_host_name]['unique_id_out']['stdout_lines']|first }}'))"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: postgres
+ become_method: sudo
+ changed_when: true
+ register: db_update_host_vms
+- debug: var=db_update_host_vms
+- name: Update dynamic data for VMs migrating to the host used to redeploy
+ command: >-
+ psql -d engine -c
+ "UPDATE vm_dynamic SET migrating_to_vds = NULL, status=0 /* Down */ WHERE migrating_to_vds IN
+ (SELECT vds_id FROM vds WHERE
+ upper(vds_unique_id)=upper('{{ hostvars[he_ansible_host_name]['unique_id_out']['stdout_lines']|first }}'))"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: postgres
+ become_method: sudo
+ changed_when: true
+ register: db_update_host_migrating_vms
+- debug: var=db_update_host_migrating_vms
+- name: Remove host used to redeploy
+ command: >-
+ psql -d engine -c
+ "SELECT deletevds(vds_id) FROM
+ (SELECT vds_id FROM vds WHERE
+ upper(vds_unique_id)=upper('{{ hostvars[he_ansible_host_name]['unique_id_out']['stdout_lines']|first }}')) t"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: postgres
+ become_method: sudo
+ changed_when: true
+ register: db_remove_he_host
+- debug: var=db_remove_he_host
+- name: Rename previous HE storage domain to avoid name conflicts
+ command: >-
+ psql -d engine -c
+ "UPDATE storage_domain_static SET
+ storage_name='{{ he_storage_domain_name }}_old_{{ ansible_date_time.iso8601_basic_short }}' WHERE
+ storage_name='{{ he_storage_domain_name }}'"
+ environment: "{{ he_cmd_lang }}"
+ become: true
+ become_user: postgres
+ become_method: sudo
+ changed_when: true
+ register: db_rename_he_sd
+- debug: var=db_rename_he_sd
+- name: Save original DisableFenceAtStartupInSec
+ shell: >-
+ set -euo pipefail && engine-config -g DisableFenceAtStartupInSec |
+ cut -d' ' -f2 > /root/DisableFenceAtStartupInSec.txt
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+- name: Update DisableFenceAtStartupInSec to prevent host fencing during the recovery
+ command: "engine-config -s DisableFenceAtStartupInSec=86400"
+ environment: "{{ he_cmd_lang }}"
+ changed_when: true
+- name: Add lines to engine-setup answerfile for PKI renewal
+ lineinfile:
+ path: /root/ovirt-engine-answers
+ line: "{{ item }}"
+ with_items:
+ - "OVESETUP_PKI/renew=bool:{{ he_pki_renew_on_restore }}"
+ - "QUESTION/1/OVESETUP_SKIP_RENEW_PKI_CONFIRM=str:yes"
+- name: remove version lock from the engine
+ file:
+ state: absent
+ path: /etc/yum/pluginconf.d/versionlock.list
+- name: recreate versionlock empty file
+ file:
+ state: touch
+ path: /etc/yum/pluginconf.d/versionlock.list
+ mode: 0644
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/search_available_network_subnet.yaml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/search_available_network_subnet.yaml
new file mode 100644
index 00000000..4cb02086
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/search_available_network_subnet.yaml
@@ -0,0 +1,60 @@
+---
+- name: Search for an available IPv4 subnet
+ block:
+ - name: Define 3rd chunk
+ set_fact:
+ chunk: 0
+ when: chunk is not defined
+ - name: Set 3rd chunk
+ set_fact:
+ chunk: "{{ chunk|int + 1 }}"
+ - debug: var=chunk
+ - name: Get ip route
+ shell: ip route get 192.168.{{ chunk }}.1 | grep "via" | cat
+ register: result
+ - debug: var=result
+ - name: Fail if can't find an available subnet
+ fail:
+ msg: >-
+ "Cannot find an available subnet for internal Libvirt network"
+ "Please set it to an unused subnet by adding the variable 'he_ipv4_subnet_prefix'"
+ "to the variable-file ( e.g. he_ipv4_subnet_prefix: '123.123.123' )."
+ when: result.stdout.find("via") == -1 and chunk|int > 253
+ - name: Set new IPv4 subnet prefix
+ set_fact:
+ he_ipv4_subnet_prefix: "192.168.{{ chunk }}"
+ when: result.stdout.find("via") != -1
+ - name: Search again with another prefix
+ include_tasks: search_available_network_subnet.yaml
+ when: result.stdout.find("via") == -1
+ when: not ipv6_deployment|bool
+
+- name: Search for an available IPv6 subnet
+ block:
+ - name: Define 3rd chunk
+ set_fact:
+ chunk: 1000
+ when: chunk is not defined
+ - name: Set 3rd chunk
+ set_fact:
+ chunk: "{{ chunk|int + 45 }}" # 200 tries
+ - debug: var=chunk
+ - name: Get ip route
+ shell: ip -6 route get fd00:1234:{{ chunk }}:900::1 | grep "via" | cat
+ register: result
+ - debug: var=result
+ - name: Fail if can't find an available subnet
+ fail:
+ msg: >-
+ "Cannot find an available subnet for internal Libvirt network"
+ "Please set it to an unused subnet by adding the variable 'he_ipv6_subnet_prefix'"
+ "to the variable-file ( e.g. he_ipv6_subnet_prefix: 'fd00:9876:5432:900' )."
+ when: result.stdout.find("via") == -1 and chunk|int > 9900
+ - name: Set new IPv6 subnet prefix
+ set_fact:
+ he_ipv6_subnet_prefix: "fd00:1234:{{ chunk }}:900"
+ when: result.stdout.find("via") != -1
+ - name: Search again with another prefix
+ include_tasks: search_available_network_subnet.yaml
+ when: result.stdout.find("via") == -1
+ when: ipv6_deployment|bool
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/sync_on_engine_machine.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/sync_on_engine_machine.yml
new file mode 100644
index 00000000..f4acaea6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/sync_on_engine_machine.yml
@@ -0,0 +1,9 @@
+---
+- name: Register the engine VM as an ansible host
+ import_tasks: add_engine_as_ansible_host.yml
+- name: Sync on engine machine
+ command: sync
+ changed_when: true
+ ignore_errors: true
+ ignore_unreachable: yes
+ delegate_to: "{{ groups.engine[0] }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_ip_prefix.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_ip_prefix.yml
new file mode 100644
index 00000000..41eb5a13
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/tasks/validate_ip_prefix.yml
@@ -0,0 +1,23 @@
+---
+- name: Validate IP prefix
+ block:
+ - name: IPv4 Validation
+ block:
+ - name: Get IPv4 route
+ command: ip route get {{ he_ipv4_subnet_prefix + ".1" }}
+ register: ip_route_result
+ - debug: var=ip_route_result
+ - name: Check if route exists
+ include_tasks: search_available_network_subnet.yaml
+ when: ip_route_result.stdout.find("via") == -1
+ when: not ipv6_deployment|bool
+ - name: IPv6 Validation
+ block:
+ - name: Get IPv6 route
+ command: ip route get {{ he_ipv6_subnet_prefix + "::1" }}
+ register: ip_route_result
+ - debug: var=ip_route_result
+ - name: Check if route exists
+ include_tasks: search_available_network_subnet.yaml
+ when: ip_route_result.stdout.find("via") == -1
+ when: ipv6_deployment|bool
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/broker.conf.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/broker.conf.j2
new file mode 100644
index 00000000..e71df8bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/broker.conf.j2
@@ -0,0 +1,8 @@
+[email]
+smtp-server = {{ he_smtp_server }}
+smtp-port = {{ he_smtp_port }}
+source-email = {{ he_source_email }}
+destination-emails = {{ he_dest_email }}
+
+[notify]
+state_transition = maintenance|start|stop|migrate|up|down
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/fhanswers.conf.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/fhanswers.conf.j2
new file mode 100644
index 00000000..5b5b1133
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/fhanswers.conf.j2
@@ -0,0 +1,66 @@
+[environment:default]
+OVEHOSTED_CORE/screenProceed=bool:True
+OVEHOSTED_CORE/deployProceed=bool:True
+OVEHOSTED_CORE/confirmSettings=bool:True
+OVEHOSTED_NETWORK/fqdn=str:{{ he_fqdn }}
+OVEHOSTED_NETWORK/bridgeName=str:{{ he_mgmt_network }}
+OVEHOSTED_NETWORK/firewallManager=str:iptables
+OVEHOSTED_NETWORK/gateway=str:{{ he_gateway }}
+OVEHOSTED_ENGINE/clusterName=str:{{ he_cluster }}
+{# TODO: FIX #}
+OVEHOSTED_STORAGE/storageDatacenterName=str:hosted_datacenter
+OVEHOSTED_STORAGE/domainType=str:{{ he_domain_type }}
+{# TODO: FIX #}
+OVEHOSTED_STORAGE/connectionUUID=str:e29cf818-5ee5-46e1-85c1-8aeefa33e95d
+OVEHOSTED_STORAGE/LunID={{ 'str' if he_lun_id else 'none' }}:{{ he_lun_id if he_lun_id else 'None' }}
+OVEHOSTED_STORAGE/imgSizeGB=str:{{ he_disk_size_GB }}
+OVEHOSTED_STORAGE/mntOptions={{ 'str' if he_mount_options else 'none' }}:{{ he_mount_options if he_mount_options else 'None' }}
+OVEHOSTED_STORAGE/iSCSIPortalIPAddress={{ 'str' if he_iscsi_portal_addr else 'none' }}:{{ he_iscsi_portal_addr if he_iscsi_portal_addr else 'None' }}
+OVEHOSTED_STORAGE/metadataVolumeUUID=str:{{ he_metadata_disk_details.disk.image_id }}
+OVEHOSTED_STORAGE/sdUUID=str:{{ storage_domain_details.ovirt_storage_domains[0].id }}
+OVEHOSTED_STORAGE/iSCSITargetName={{ 'str' if he_iscsi_target else 'none' }}:{{ he_iscsi_target if he_iscsi_target else 'None' }}
+OVEHOSTED_STORAGE/metadataImageUUID=str:{{ he_metadata_disk_details.disk.id }}
+OVEHOSTED_STORAGE/lockspaceVolumeUUID=str:{{ he_sanlock_disk_details.disk.image_id }}
+OVEHOSTED_STORAGE/iSCSIPortalPort={{ 'str' if he_iscsi_portal_port else 'none' }}:{{ he_iscsi_portal_port if he_iscsi_portal_port else 'None' }}
+OVEHOSTED_STORAGE/imgUUID=str:{{ he_virtio_disk_details.disk.id }}
+OVEHOSTED_STORAGE/confImageUUID=str:{{ he_conf_disk_details.disk.id }}
+OVEHOSTED_STORAGE/spUUID=str:00000000-0000-0000-0000-000000000000
+OVEHOSTED_STORAGE/lockspaceImageUUID=str:{{ he_sanlock_disk_details.disk.id }}
+{# TODO: FIX #}
+OVEHOSTED_ENGINE/enableHcGlusterService=none:None
+OVEHOSTED_STORAGE/storageDomainName=str:{{ he_storage_domain_name }}
+OVEHOSTED_STORAGE/iSCSIPortal={{ 'str' if he_iscsi_tpgt else 'none' }}:{{ he_iscsi_tpgt if he_iscsi_tpgt else 'None' }}
+OVEHOSTED_STORAGE/volUUID=str:{{ he_virtio_disk_details.disk.image_id }}
+{# TODO: FIX #}
+OVEHOSTED_STORAGE/vgUUID=none:None
+OVEHOSTED_STORAGE/confVolUUID=str:{{ he_conf_disk_details.disk.image_id }}
+{% if he_domain_type=="nfs" or he_domain_type=="glusterfs" %}
+OVEHOSTED_STORAGE/storageDomainConnection=str:{{ he_storage_domain_addr }}:{{ he_storage_domain_path }}
+{% else %}
+OVEHOSTED_STORAGE/storageDomainConnection=str:{{ he_storage_domain_addr }}
+{% endif %}
+OVEHOSTED_STORAGE/iSCSIPortalUser={{ 'str' if he_iscsi_username else 'none' }}:{{ he_iscsi_username if he_iscsi_username else 'None' }}
+{# TODO: fix it #}
+OVEHOSTED_VDSM/consoleType=str:vnc
+OVEHOSTED_VM/vmMemSizeMB=int:{{ he_mem_size_MB }}
+OVEHOSTED_VM/vmUUID=str:{{ he_vm_details.vm.id }}
+OVEHOSTED_VM/vmMACAddr=str:{{ he_vm_mac_addr }}
+OVEHOSTED_VM/emulatedMachine=str:{{ he_emulated_machine }}
+OVEHOSTED_VM/vmVCpus=str:{{ he_vcpus }}
+OVEHOSTED_VM/ovfArchive=str:{{ he_appliance_ova }}
+OVEHOSTED_VM/vmCDRom=none:None
+OVEHOSTED_VM/automateVMShutdown=bool:True
+OVEHOSTED_VM/cloudInitISO=str:generate
+OVEHOSTED_VM/cloudinitInstanceDomainName={{ 'str' if he_cloud_init_domain_name else 'none' }}:{{ he_cloud_init_domain_name if he_cloud_init_domain_name else 'None' }}
+OVEHOSTED_VM/cloudinitInstanceHostName={{ 'str' if he_cloud_init_host_name else 'none' }}:{{ he_cloud_init_host_name if he_cloud_init_host_name else 'None' }}
+OVEHOSTED_VM/rootSshPubkey={{ 'str' if he_root_ssh_pubkey else 'none' }}:{{ he_root_ssh_pubkey if he_root_ssh_pubkey else 'None' }}
+OVEHOSTED_VM/cloudinitExecuteEngineSetup=bool:True
+OVEHOSTED_VM/cloudinitVMStaticCIDR={{ 'str' if he_vm_ip_addr is not none else 'none' }}:{{ he_vm_ip_addr if he_vm_ip_addr is not none else 'None' }}
+OVEHOSTED_VM/cloudinitVMTZ={{ 'str' if he_time_zone else 'none' }}:{{ he_time_zone if he_time_zone else 'None' }}
+OVEHOSTED_VM/rootSshAccess=str:yes
+OVEHOSTED_VM/cloudinitVMETCHOSTS=bool:{{ he_vm_etc_hosts }}
+OVEHOSTED_VM/cloudinitVMDNS={{ 'str' if he_dns_addr else 'none' }}:{{ he_dns_addr if he_dns_addr else 'None' }}
+OVEHOSTED_NOTIF/smtpPort=str:{{ he_smtp_port }}
+OVEHOSTED_NOTIF/smtpServer=str:{{ he_smtp_server }}
+OVEHOSTED_NOTIF/sourceEmail=str:{{ he_source_email }}
+OVEHOSTED_NOTIF/destEmail=str:{{ he_dest_email }}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/hosted-engine.conf.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/hosted-engine.conf.j2
new file mode 100644
index 00000000..ee5e58fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/hosted-engine.conf.j2
@@ -0,0 +1,44 @@
+fqdn={{ he_fqdn }}
+vm_disk_id={{ he_virtio_disk_details.disk.id }}
+vm_disk_vol_id={{ he_virtio_disk_details.disk.image_id }}
+vmid={{ he_vm_details.vm.id }}
+{% if he_domain_type=="nfs" or he_domain_type=="glusterfs" %}
+storage={{ he_storage_domain_addr }}:{{ he_storage_domain_path }}
+{% else %}
+storage={{ he_storage_domain_addr }}
+{% endif %}
+nfs_version={{ he_nfs_version }}
+mnt_options={{ he_mount_options }}
+conf=/var/run/ovirt-hosted-engine-ha/vm.conf
+host_id={{ host_spm_id }}
+console=vnc
+domainType={{ he_domain_type }}
+{# spUUID={{ datacenter_id }} #}
+{# To avoid triggering #}
+{# 3.5 -> 3.6 upgrade code #}
+spUUID=00000000-0000-0000-0000-000000000000
+sdUUID={{ storage_domain_details.ovirt_storage_domains[0].id }}
+{# TODO: fix it #}
+connectionUUID=e29cf818-5ee5-46e1-85c1-8aeefa33e95d
+ca_cert=/etc/pki/vdsm/libvirt-spice/ca-cert.pem
+ca_subject="C=EN, L=Test, O=Test, CN=Test"
+vdsm_use_ssl=true
+gateway={{ he_gateway }}
+bridge={{ he_mgmt_network }}
+network_test={{ he_network_test }}
+tcp_t_address={{ he_tcp_t_address }}
+tcp_t_port={{ he_tcp_t_port }}
+metadata_volume_UUID={{ he_metadata_disk_details.disk.image_id }}
+metadata_image_UUID={{ he_metadata_disk_details.disk.id }}
+lockspace_volume_UUID={{ he_sanlock_disk_details.disk.image_id }}
+lockspace_image_UUID={{ he_sanlock_disk_details.disk.id }}
+conf_volume_UUID={{ he_conf_disk_details.disk.image_id }}
+conf_image_UUID={{ he_conf_disk_details.disk.id }}
+{# TODO: get OVF_STORE volume uid from the engine at deploy time #}
+
+# The following are used only for iSCSI storage
+iqn={{ he_iscsi_target }}
+portal={{ he_iscsi_tpgt }}
+user={{ he_iscsi_username }}
+password={{ he_iscsi_password }}
+port={{ he_iscsi_portal_port }}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-dhcp.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-dhcp.j2
new file mode 100644
index 00000000..544737ab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-dhcp.j2
@@ -0,0 +1,12 @@
+# generated by ovirt-hosted-engine-setup
+BOOTPROTO=dhcp
+DEVICE=eth0
+HWADDR="{{ he_vm_mac_addr }}"
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+ZONE=public
+DEFROUTE=yes
+IPV4_FAILURE_FATAL=no
+IPV6INIT=no
+NM_CONTROLLED=yes
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static-ipv6.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static-ipv6.j2
new file mode 100644
index 00000000..ae3ec889
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static-ipv6.j2
@@ -0,0 +1,24 @@
+# generated by ovirt-hosted-engine-setup
+BOOTPROTO=none
+DEVICE=eth0
+HWADDR="{{ he_vm_mac_addr }}"
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+ZONE=public
+IPV6INIT=yes
+IPV6ADDR={{ he_vm_ip_addr }}/{{ he_vm_ip_prefix }}
+IPV6_DEFAULTGW={{ he_gateway }}
+{% if he_dns_addr is string %}
+{% set DNS_ADDR_LIST = he_dns_addr.split(',') %}
+{% elif he_dns_addr is iterable %}
+{% set DNS_ADDR_LIST = he_dns_addr %}
+{% else %}
+{% set DNS_ADDR_LIST = [] %}
+{% endif %}
+{% for d in DNS_ADDR_LIST %}
+DNS{{loop.index}}={{ d }}
+{% endfor %}
+DEFROUTE=yes
+IPV4_FAILURE_FATAL=no
+NM_CONTROLLED=yes
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static.j2
new file mode 100644
index 00000000..2f61d262
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/ifcfg-eth0-static.j2
@@ -0,0 +1,25 @@
+# generated by ovirt-hosted-engine-setup
+BOOTPROTO=none
+DEVICE=eth0
+HWADDR="{{ he_vm_mac_addr }}"
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+ZONE=public
+IPADDR={{ he_vm_ip_addr }}
+PREFIX={{ he_vm_ip_prefix }}
+GATEWAY={{ he_gateway }}
+{% if he_dns_addr is string %}
+{% set DNS_ADDR_LIST = he_dns_addr.split(',') %}
+{% elif he_dns_addr is iterable %}
+{% set DNS_ADDR_LIST = he_dns_addr %}
+{% else %}
+{% set DNS_ADDR_LIST = [] %}
+{% endif %}
+{% for d in DNS_ADDR_LIST %}
+DNS{{loop.index}}={{ d }}
+{% endfor %}
+DEFROUTE=yes
+IPV4_FAILURE_FATAL=no
+IPV6INIT=no
+NM_CONTROLLED=yes
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/meta-data.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/meta-data.j2
new file mode 100644
index 00000000..3e34dcce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/meta-data.j2
@@ -0,0 +1,2 @@
+instance-id: {{ he_vm_uuid }}
+local-hostname: {{ he_fqdn }}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config-dhcp.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config-dhcp.j2
new file mode 100644
index 00000000..1da9f5d2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/network-config-dhcp.j2
@@ -0,0 +1,11 @@
+version: 1
+config:
+ - type: physical
+ name: eth0
+ mac_address: "{{ he_vm_mac_addr|lower }}"
+ subnets:
+{% if ipv6_deployment %}
+ - type: dhcp6
+{% else %}
+ - type: dhcp
+{% endif %}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/user-data.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/user-data.j2
new file mode 100644
index 00000000..36e15313
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/user-data.j2
@@ -0,0 +1,20 @@
+#cloud-config
+# vim: syntax=yaml
+disable_root: false
+{% if he_root_ssh_pubkey is not none and he_root_ssh_pubkey|length > 1 %}
+ssh_authorized_keys:
+ - {{ he_root_ssh_pubkey }}
+{% endif %}
+ssh_pwauth: True
+chpasswd:
+ list: |
+ root:{{ he_hashed_appliance_password }}
+ expire: False
+{% if he_time_zone is defined %}
+timezone: {{ he_time_zone }}
+{% endif %}
+bootcmd:
+ - if grep -Gq "^\s*PermitRootLogin" /etc/ssh/sshd_config; then sed -re "s/^\s*(PermitRootLogin)\s+(yes|no|without-password)/\1 yes/" -i.$(date -u +%Y%m%d%H%M%S) /etc/ssh/sshd_config; else echo "PermitRootLogin yes" >> /etc/ssh/sshd_config; fi
+ - if grep -Gq "^\s*UseDNS" /etc/ssh/sshd_config; then sed -re "s/^\s*(UseDNS)\s+(yes|no)/\1 no/" -i.$(date -u +%Y%m%d%H%M%S) /etc/ssh/sshd_config; else echo "UseDNS no" >> /etc/ssh/sshd_config; fi
+runcmd:
+ - systemctl restart sshd &
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/version.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/version.j2
new file mode 100644
index 00000000..8e64b180
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/version.j2
@@ -0,0 +1 @@
+{{ ha_version }}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/vm.conf.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/vm.conf.j2
new file mode 100644
index 00000000..f9ca7022
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/hosted_engine_setup/templates/vm.conf.j2
@@ -0,0 +1,17 @@
+vmId={{ he_vm_details.vm.id }}
+memSize={{ he_mem_size_MB }}
+display={{ he_console_type }}
+devices={index:2,iface:ide,address:{ controller:0, target:0,unit:0, bus:1, type:drive},specParams:{},readonly:true,deviceId:{{ he_cdrom_uuid }},path:{{ he_cdrom }},device:cdrom,shared:false,type:disk}
+devices={index:0,iface:virtio,format:raw,poolID:00000000-0000-0000-0000-000000000000,volumeID:{{ he_virtio_disk_details.disk.image_id }},imageID:{{ he_virtio_disk_details.disk.id }},specParams:{},readonly:false,domainID:{{ storage_domain_details.ovirt_storage_domains[0].id }},optional:false,deviceId:{{ he_virtio_disk_details.disk.image_id }},address:{bus:0x00, slot:0x06, domain:0x0000, type:pci, function:0x0},device:disk,shared:exclusive,propagateErrors:off,type:disk,bootOrder:1}
+devices={device:scsi,model:virtio-scsi,type:controller}
+devices={nicModel:pv,macAddr:{{ he_vm_mac_addr }},linkActive:true,network:{{ he_mgmt_network }},specParams:{},deviceId:{{ he_nic_uuid }},address:{bus:0x00, slot:0x03, domain:0x0000, type:pci, function:0x0},device:bridge,type:interface}
+devices={device:console,type:console}
+devices={device:{{ he_video_device }},alias:video0,type:video}
+devices={device:{{ he_graphic_device }},type:graphics}
+vmName={{ he_vm_name }}
+spiceSecureChannels=smain,sdisplay,sinputs,scursor,splayback,srecord,ssmartcard,susbredir
+smp={{ he_vcpus }}
+maxVCpus={{ he_maxvcpus }}
+cpuType={{ cluster_cpu_model }}
+emulatedMachine={{ he_emulated_machine }}
+devices={device:virtio,specParams:{source:urandom},model:virtio,type:rng}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/README.md
new file mode 100644
index 00000000..fc248e85
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/README.md
@@ -0,0 +1,157 @@
+oVirt Image Template
+====================
+
+The `image_template` role creates a template from external image. Currently the disk can be an image in Glance external provider or QCOW2 image.
+
+Requirements
+------------
+
+ * oVirt has to be 4.1 or higher and [ovirt-imageio] must be installed and running.
+ * CA certificate of oVirt engine. The path to CA certificate must be specified in the `ovirt_ca` variable.
+ * file
+
+Limitations
+-----------
+
+ * We don not support Ansible Check Mode (Dry Run), because this role is using few modules(command module),
+ which do not support it. Once all modules used by this role will support it, we will support it.
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|--------------------|-----------------------|----------------------------|
+| qcow_url | UNDEF (mandatory if glance is not used) | The URL of the QCOW2 image. You can specify local file with prefix 'file://'. |
+| qcow_url_client_cert | UNDEF | Path to client certificate if needed for retrieving QCOW from authenticated site. |
+| qcow_url_client_key | UNDEF | Path to client key if needed for retrieving QCOW from authenticated site. |
+| image_path | /tmp/ | Path where the QCOW2 image will be downloaded to. If directory the base name of the URL on the remote server will be used. |
+| image_checksum | UNDEF | If a checksum is defined, the digest of the destination file will be calculated after it is downloaded to ensure its integrity and verify that the transfer completed successfully. Format: <algorithm>:<checksum>, e.g. checksum="sha256:D98291AC[...]B6DC7B97". |
+| image_cache_download | true | When set to false will delete image_path at the start and end of execution |
+| template_cluster | Default | Name of the cluster where template must be created. |
+| template_io_threads| UNDEF | Number of IO threads used by template. 0 means IO threading disabled. (Added in ansible 2.7)|
+| template_name | mytemplate | Name of the template. |
+| template_memory | 2GiB | Amount of memory assigned to the template. |
+| template_memory_guaranteed | UNDEF | Amount of minimal guaranteed memory of the Virtual Machine |
+| template_memory_max | UNDEF | Upper bound of virtual machine memory up to which memory hot-plug can be performed. |
+| template_cpu | 1 | Number of CPUs assigned to the template. |
+| template_disk_storage | UNDEF | Name of the data storage domain where the disk must be created. If not specified, the data storage domain is selected automatically. |
+| template_disks | [] | List of dictionaries specifying the additional template disks. See below for more detailed description. |
+| template_disk_size | 10GiB | The size of the template disk. |
+| template_disk_name | UNDEF | The name of template disk. |
+| template_disk_format | UNDEF | Format of the template disk. |
+| template_disk_interface | virtio | Interface of the template disk. (Choices: virtio, ide, virtio_scsi) |
+| template_seal | true | 'Sealing' erases all machine-specific configurations from a filesystem. Not supported on Windows. Set this to 'false' for Windows. |
+| template_timeout | 600 | Amount of time to wait for the template to be created/imported. |
+| template_type | UNDEF | The type of the template: desktop, server or high_performance (for qcow2 based templates only) |
+| template_nics | {name: nic1, profile_name: ovirtmgmt, interface: virtio} | List of dictionaries that specify the NICs of template. |
+| template_operating_system | UNDEF | Operating system of the template like: other, rhel_7x64, debian_7, see others in ovirt_template module. |
+| glance_image_provider | UNDEF (mandatory if qcow_url is not used) | Name of the glance image provider. |
+| glance_image | UNDEF (mandatory if qcow_url is not used) | This parameter specifies the name of disk in glance provider to be imported as template. |
+| template_prerequisites_tasks | UNDEF | Works only with qcow image. Specify a path to Ansible tasks file, which should be executed on virtual machine before creating a template from it. Note that qcow image must contain guest agent which reports IP address. |
+
+The `template_disks` List of dictionaries can contain following attributes:
+
+| Name | Default value | |
+|--------------------|----------------|----------------------------------------------|
+| name | UNDEF (Required) | The name of the additional disk. |
+| size | UNDEF (Required) | The size of the additional disk. |
+| storage_domain | UNDEF | The name of storage domain where disk should be created. If no value is passed, value is set by <i>template_disk_storage</i>. |
+| interface | UNDEF | The interface of the disk. If no value is passed, value is set by <i>template_disk_interface</i>. |
+| format | UNDEF | Specify format of the disk. If no value is passed, value is set by <i>template_disk_format</i>. <ul><li>cow - If set, the disk will by created as sparse disk, so space will be allocated for the volume as needed. This format is also known as thin provisioned disks</li><li>raw - If set, disk space will be allocated right away. This format is also known as preallocated disks.</li></ul> |
+| bootable | UNDEF | True if the disk should be bootable. |
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: Create a template from qcow
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ qcow_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+ #qcow_url: file:///tmp/CentOS-7-x86_64-GenericCloud.qcow2
+ template_cluster: production
+ template_name: centos7_template
+ template_memory: 4GiB
+ template_cpu: 2
+ template_disk_size: 10GiB
+ template_disk_storage: mydata
+
+ roles:
+ - image_template
+ collections:
+ - ovirt.ovirt
+
+
+- name: Create a template from a disk stored in glance
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ glance_image_provider: qe-infra-glance
+ glance_image: rhel7.4_ovirt4.2_guest_disk
+ template_cluster: production
+ template_name: centos7_template
+ template_memory: 4GiB
+ template_cpu: 2
+ template_disk_size: 10GiB
+ template_disk_storage: mydata
+
+ roles:
+ - image_template
+ collections:
+ - ovirt.ovirt
+
+- name: Create a template from qcow2.xz
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ pre_tasks:
+ - name: Download qcow2.xz file
+ get_url:
+ url: "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz"
+ dest: /tmp
+ register: downloaded_file
+
+ - name: Extract downloaded QCOW image
+ command: "unxz --keep --force {{ downloaded_file.dest }}"
+
+ - name: Set qcow_url to extracted file
+ set_fact:
+ qcow_url: "file://{{ (downloaded_file.dest | splitext)[0] }}"
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ template_cluster: production
+ template_name: centos7_template
+ template_memory: 4GiB
+ template_cpu: 2
+ template_disk_size: 10GiB
+ template_disk_storage: mydata
+
+ roles:
+ - image_template
+ collections:
+ - ovirt.ovirt
+```
+
+[![asciicast](https://asciinema.org/a/111478.png)](https://asciinema.org/a/111478)
+
+[ovirt-imageio]: http://www.ovirt.org/develop/release-management/features/storage/image-upload/
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/defaults/main.yml
new file mode 100644
index 00000000..db3f2234
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+image_path: /tmp
+image_cache_download: true
+image_download_timeout: 180
+template_cluster: Default
+template_name: mytemplate
+template_memory: 2GiB
+template_cpu: 1
+template_disk_size: 10GiB
+template_operating_system: rhel_7x64
+template_timeout: 600
+template_disks: []
+template_disk_interface: virtio
+template_nics:
+ - name: nic1
+ profile_name: ovirtmgmt
+ interface: virtio
+template_seal: true
+
+disk_resize_timeout: 60
+disk_storage_domain: null
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/examples/ovirt_image_template.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/examples/ovirt_image_template.yml
new file mode 100644
index 00000000..535dce7c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/examples/ovirt_image_template.yml
@@ -0,0 +1,27 @@
+---
+- name: oVirt image template
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+ qcow_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+ #qcow_url: file:///tmp/CentOS-7-x86_64-GenericCloud.qcow2
+ template_cluster: production
+ template_name: centos7_template
+ template_memory: 2GiB
+ template_cpu: 2
+ template_disk_size: 10GiB
+ template_disk_storage: nfs
+
+ roles:
+ - image_template
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/examples/passwords.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/examples/passwords.yml
new file mode 100644
index 00000000..92c7613c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/empty.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/empty.yml
new file mode 100644
index 00000000..c0e9c1e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/empty.yml
@@ -0,0 +1,8 @@
+---
+# Placeholder Ansible tasks file, to avoid WARNINGS in play,
+# when user don't specify template_prerequisites_tasks.
+# When in future Anibsle will support delegate_to with include_tasks,
+# this file won't be needed.
+- name: Placeholder
+ debug:
+ msg: ""
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/glance_image.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/glance_image.yml
new file mode 100644
index 00000000..31eb8e3c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/glance_image.yml
@@ -0,0 +1,60 @@
+---
+- block:
+ - name: Check if template is correct
+ fail:
+ msg: "one of mandatory parameters glance_image_provider or glance_image is not defined"
+ when: "glance_image_provider is undefined or glance_image is undefined"
+
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ hostname: "{{ engine_fqdn | default(lookup('env','OVIRT_HOSTNAME')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: loggedin
+ tags:
+ - always
+
+ - name: Fetch storages
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ register: sd_info
+ tags:
+ - ovirt-template-image
+
+ - name: Find data domain
+ set_fact:
+ disk_storage_domain: "{{ sd_info.ovirt_storage_domains|json_query(the_query)|list|first }}"
+ vars:
+ the_query: "[?type=='data']"
+ tags:
+ - ovirt-template-image
+
+ - name: Import templates from glance
+ ovirt_template:
+ auth: "{{ ovirt_auth }}"
+ state: imported
+ name: "{{ template_name }}"
+ template_image_disk_name: "{{ template_disk_name | default(omit) }}"
+ image_provider: "{{ glance_image_provider }}"
+ image_disk: "{{ glance_image }}"
+ io_threads: "{{ template_io_threads | default(omit) }}"
+ storage_domain: "{{ template_disk_storage | default(disk_storage_domain.name) }}"
+ cluster: "{{ template_cluster }}"
+ operating_system: "{{ template_operating_system | default(omit) }}"
+ memory: "{{ template_memory | default(omit) }}"
+ memory_guaranteed: "{{ template_memory_guaranteed | default(omit) }}"
+ memory_max: "{{ template_memory_max | default(omit) }}"
+ timeout: "{{ template_timeout }}"
+
+ always:
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when: not loggedin.skipped | default(false)
+ tags:
+ - always
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/main.yml
new file mode 100644
index 00000000..3f1ccf77
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Fail when invalid parameters
+ fail:
+ msg: "You must either specify qcow_url or glance_image"
+ when: "glance_image is defined and qcow_url is defined"
+
+- name: Image upload
+ include_tasks: "{{ (glance_image is defined) | ternary('glance', 'qcow2') }}_image.yml"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/qcow2_image.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/qcow2_image.yml
new file mode 100644
index 00000000..77c579ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/tasks/qcow2_image.yml
@@ -0,0 +1,254 @@
+---
+- name: Check if template is correct
+ fail:
+ msg: "mandatory parameter 'qcow_url' is not defined"
+ when: "qcow_url is undefined"
+
+- name: Check if {{ image_path }} is directory
+ stat:
+ path: "{{ image_path }}"
+ register: image_path_st
+
+- name: Download the qcow image
+ get_url:
+ url: "{{ qcow_url }}"
+ dest: "{{ image_path_st.stat.isdir is defined and image_path_st.stat.isdir | ternary(image_path~'/'~qcow_url.rpartition('/')[-1], image_path) | regex_replace('//', '/') }}"
+ force: "{{ not image_cache_download }}"
+ checksum: "{{ image_checksum | default(omit) }}"
+ timeout: "{{ image_download_timeout }}"
+ client_cert: "{{ qcow_url_client_cert | default(omit) }}"
+ client_key: "{{ qcow_curl_client_key | default(omit) }}"
+ register: downloaded_file
+ tags:
+ - ovirt-template-image
+
+- name: Check file type
+ command: "/usr/bin/file {{ downloaded_file.dest | quote }}"
+ changed_when: false
+ register: filetype
+ tags:
+ - ovirt-template-image
+
+- name: Fail if image is not qcow
+ fail:
+ msg: "The downloaded file is not valid QCOW file."
+ when: '"QCOW" not in filetype.stdout'
+ tags:
+ - ovirt-template-image
+
+- name: Calculate image size in GiB
+ set_fact:
+ qcow2_size: "{{ (filetype.stdout_lines[0].split()[5] | int / 2**30) | round(0, 'ceil') | int }}GiB"
+
+- block:
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ hostname: "{{ engine_fqdn | default(lookup('env','OVIRT_HOSTNAME')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: loggedin
+ tags:
+ - always
+
+ - name: Fetch the datacenter name
+ ovirt_datacenter_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "Clusters.name = {{ template_cluster }}"
+ register: dc_info
+
+ - name: Fetch storages
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ dc_info.ovirt_datacenters[0].name }}"
+ register: sd_info
+ when: template_disk_storage is undefined
+ tags:
+ - ovirt-template-image
+
+ - name: Find data domain
+ set_fact:
+ disk_storage_domain: "{{ sd_info.ovirt_storage_domains|json_query(the_query)|list|first }}"
+ when: template_disk_storage is undefined
+ vars:
+ the_query: "[?type=='data']"
+ tags:
+ - ovirt-template-image
+
+ - name: Check if template already exists
+ ovirt_template_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ template_name }} and datacenter={{ dc_info.ovirt_datacenters[0].name }}"
+ register: template_info
+ tags:
+ - ovirt-template-image
+
+ - name: Deploy the qcow image to oVirt engine
+ ovirt_disk:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ template_disk_name | default(template_name) }}"
+ size: "{{ qcow2_size }}"
+ format: "{{ template_disk_format | default(omit) }}"
+ image_path: "{{ downloaded_file.dest }}"
+ storage_domain: "{{ template_disk_storage | default(disk_storage_domain.name) }}"
+ force: "{{ template_info.ovirt_templates | length == 0 }}"
+ register: ovirt_disk
+ when: template_info.ovirt_templates | length == 0
+ tags:
+ - ovirt-template-image
+
+ - name: Wait until the qcow image is unlocked by the oVirt engine
+ ovirt_disk_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "id={{ ovirt_disk.id }}"
+ register: disk_info
+ until: ((ovirt_disk is defined) and (ovirt_disk.disk.status != "locked")) or ((disk_info is defined) and (disk_info.ovirt_disks[0].status != "locked"))
+ retries: 20
+ delay: 3
+ when: template_info.ovirt_templates | length == 0
+ tags:
+ - ovirt-template-image
+
+ - block:
+ - name: Generate SSH keys
+ command: "ssh-keygen -t rsa -f {{ tmp_private_key_file }} -N ''"
+ args:
+ creates: "{{ tmp_private_key_file }}"
+ when: template_prerequisites_tasks is defined
+ delegate_to: localhost
+
+ - name: Create vm
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ vm_name }}"
+ state: "{{ 'running' if template_prerequisites_tasks is defined else 'stopped' }}"
+ cluster: "{{ template_cluster }}"
+ io_threads: "{{ template_io_threads | default(omit) }}"
+ memory: "{{ template_memory | default(omit) }}"
+ memory_guaranteed: "{{ template_memory_guaranteed | default(omit) }}"
+ memory_max: "{{ template_memory_max | default(omit) }}"
+ cpu_cores: "{{ template_cpu }}"
+ operating_system: "{{ template_operating_system }}"
+ type: "{{ template_type | default(omit) }}"
+ cloud_init: "{{ {'user_name': 'root', 'authorized_ssh_keys': lookup('file', tmp_private_key_file~'.pub') } if template_prerequisites_tasks is defined else omit }}"
+ disks:
+ - id: "{{ disk_info.ovirt_disks[0].id }}"
+ bootable: true
+ interface: "{{ template_disk_interface }}"
+ nics: "{{ template_nics }}"
+
+ - name: Manage disks
+ ovirt_disk:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item.name | default(omit) }}"
+ size: "{{ item.size | default(omit) }}"
+ interface: "{{ item.interface | default(template_disk_interface) | default(omit) }}"
+ vm_name: "{{ vm_name }}"
+ format: "{{ item.format | default(template_disk_format) | default(omit) }}"
+ storage_domain: "{{ item.storage_domain | default(template_disk_storage) | default(omit) }}"
+ bootable: "{{ item.bootable | default(omit) }}"
+ wait: true
+ loop: "{{ template_disks }}"
+
+ - block:
+ - name: Wait for VMs IPv4
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ vm_name }}"
+ fetch_nested: true
+ nested_attributes: ips
+ register: vm_info
+ until: "vm_info.ovirt_vms | ovirt.ovirt.ovirtvmipv4 | length > 0"
+ retries: 10
+ delay: 5
+
+ - name: Set Ip of the VM
+ set_fact:
+ vm_ip: "{{ vm_info.ovirt_vms | ovirt.ovirt.ovirtvmipv4 }}"
+
+ - name: Include prerequisites tasks for VM
+ import_tasks: "{{ template_prerequisites_tasks if template_prerequisites_tasks is defined else 'empty.yml' }}"
+ static: no
+ delegate_to: "{{ vm_ip }}"
+ vars:
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+ ansible_user: root
+ ansible_ssh_private_key_file: "{{ tmp_private_key_file | default(omit) }}"
+
+ - name: Remove SSH keys
+ file:
+ state: absent
+ path: "{{ item }}"
+ delegate_to: localhost
+ with_items:
+ - "{{ tmp_private_key_file }}"
+ - "{{ tmp_private_key_file }}.pub"
+
+ - name: Stop vm
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ vm_name }}"
+ state: stopped
+
+ when: template_prerequisites_tasks is defined
+
+ when: template_info.ovirt_templates | length == 0
+
+ - block:
+ - name: Resize disk if smaller than template_disk_size
+ ovirt_disk:
+ id: "{{ disk_info.ovirt_disks[0].id }}"
+ vm_name: "{{ vm_name }}"
+ auth: "{{ ovirt_auth }}"
+ size: "{{ template_disk_size }}"
+
+ - name: Wait for resize
+ ovirt_disk:
+ id: "{{ disk_info.ovirt_disks[0].id }}"
+ auth: "{{ ovirt_auth }}"
+ register: resized_disk
+ until: resized_disk.disk.provisioned_size != disk_info.ovirt_disks[0].provisioned_size
+ retries: "{{ (disk_resize_timeout / 3) | int }}"
+ delay: 3
+ when:
+ - (template_disk_size | regex_replace('GiB') | int) > (qcow2_size | regex_replace('GiB') | int)
+ - template_info.ovirt_templates | length == 0
+
+ - name: Create template
+ ovirt_template:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ template_name }}"
+ vm: "{{ vm_name }}"
+ cluster: "{{ template_cluster }}"
+ timeout: "{{ template_timeout }}"
+ seal: "{{ template_seal }}"
+ when: template_info.ovirt_templates | length == 0
+ tags:
+ - ovirt-template-image
+
+ always:
+ - name: Remove downloaded image
+ file:
+ path: "{{ downloaded_file.dest }}"
+ state: absent
+ when: not image_cache_download
+
+ - name: Remove vm
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: absent
+ name: "{{ vm_name }}"
+ when: template_info.ovirt_templates | length == 0
+ tags:
+ - ovirt-template-image
+
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when: not loggedin.skipped | default(false)
+ tags:
+ - always
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/vars/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/vars/main.yml
new file mode 100644
index 00000000..3ceedc03
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/image_template/vars/main.yml
@@ -0,0 +1,3 @@
+---
+vm_name: temporary_vm_name_12367123
+tmp_private_key_file: /tmp/.image_template.key
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/README.md
new file mode 100644
index 00000000..1a2adfac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/README.md
@@ -0,0 +1,436 @@
+oVirt Infra
+===========
+
+The `infra` role enables you to set up oVirt infrastructure including: mac pools, data centers, clusters, networks, hosts, users, and groups.
+
+Target machine
+--------------
+In case you use this role to do user management, it will use `ovirt-aaa-jdbc-tool`, which is located on engine machine,
+so you must execute the role on engine machine.
+
+
+Role Variables
+--------------
+
+### Datacenter
+To setup/cleanup datacenter you can use following variables:
+
+| Name | Default value | Description |
+|--------------------------|-----------------------|--------------------------------------|
+| data_center_name | UNDEF | Name of the data center. |
+| data_center_description | UNDEF | Description of the data center. |
+| data_center_local | false | Specify whether the data center is shared or local. |
+| compatibility_version | UNDEF | Compatibility version of data center. |
+| data_center_state | present | Specify whether the datacenter should be present or absent. |
+| recursive_cleanup | false | Specify whether to recursively remove all entities inside DC. Valid only when state == absent. |
+| format_storages | false | Specify whether to format ALL the storages that are going to be removed as part of the DC. Valid only when data_center_state == absent and recursive_cleanup == true. |
+
+### MAC pools
+To setup MAC pools you can define list variable called `mac_pools`.
+The items in `mac_pools` list variable can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------------------|-----------------------|-------------------------------------------------------------------|
+| mac_pool_name | UNDEF | Name of the the MAC pool to manage. |
+| mac_pool_ranges | UNDEF | List of MAC ranges. The from and to should be splitted by comma. For example: 00:1a:4a:16:01:51,00:1a:4a:16:01:61 |
+| mac_pool_allow_duplicates | UNDEF | If (true) allow a MAC address to be used multiple times in a pool. Default value is set by oVirt engine to false. |
+
+### Clusters
+To setup clusters you can define list variable called `clusters`.
+The items in `clusters` list variable can contain the following parameters:
+
+| Name | Default value | Description |
+|-----------------------------------|---------------------|------------------------------------------|
+| name | UNDEF (Required) | Name of the cluster. |
+| state | present | State of the cluster. |
+| cpu_type | Intel Conroe Family | CPU type of the cluster. |
+| profile | UNDEF | The cluster profile. You can choose a predefined cluster profile, see the tables below. |
+| ballooning | UNDEF | If True enable memory balloon optimization. Memory balloon is used to re-distribute / reclaim the host memory based on VM needs in a dynamic way. |
+| description | UNDEF | Description of the cluster. |
+| ksm | UNDEF | I True MoM enables to run Kernel Same-page Merging KSM when necessary and when it can yield a memory saving benefit that outweighs its CPU cost. |
+| ksm_numa | UNDEF | If True enables KSM ksm for best berformance inside NUMA nodes. |
+| vm_reason | UNDEF | If True enable an optional reason field when a virtual machine is shut down from the Manager, allowing the administrator to provide an explanation for the maintenance. |
+| host_reason | UNDEF | If True enable an optional reason field when a host is placed into maintenance mode from the Manager, allowing the administrator to provide an explanation for the maintenance. |
+| memory_policy<br/>alias: <i>performance_preset</i> | UNDEF | <ul><li>disabled - Disables memory page sharing.</li><li>server - Sets the memory page sharing threshold to 150% of the system memory on each host.</li><li>desktop - Sets the memory page sharing threshold to 200% of the system memory on each host.</li></ul> |
+| migration_policy | UNDEF | A migration policy defines the conditions for live migrating virtual machines in the event of host failure. Following policies are supported:<ul><li>legacy - Legacy behavior of 3.6 version.</li><li>minimal_downtime - Virtual machines should not experience any significant downtime.</li><li>suspend_workload - Virtual machines may experience a more significant downtime.</li><li>post_copy - Virtual machines should not experience any significant downtime. If the VM migration is not converging for a long time, the migration will be switched to post-copy</li></ul> |
+| scheduling_policy | UNDEF | The scheduling policy used by the cluster. |
+| ha_reservation | UNDEF | If True enable the oVirt/RHV to monitor cluster capacity for highly available virtual machines. |
+| fence_enabled | UNDEF | If True, enables fencing on the cluster. |
+| fence_connectivity_threshold | UNDEF | The threshold used by <i>fence_skip_if_connectivity_broken</i>. |
+| fence_skip_if_connectivity_broken | UNDEF | If True, fencing will be temporarily disabled if the percentage of hosts in the cluster that are experiencing connectivity issues is greater than or equal to the defined threshold. |
+| fence_skip_if_sd_active | UNDEF | If True, any hosts in the cluster that are Non Responsive and still connected to storage will not be fenced. |
+| mac_pool | UNDEF | Mac pool name. |
+| comment | UNDEF | Comment of the cluster. |
+| migration_bandwidth | UNDEF | The bandwidth settings define the maximum bandwidth of both outgoing and incoming migrations per host.<br/>Following bandwidth options are supported:<br/><ul><li>auto - Bandwidth is copied from the rate limit [Mbps] setting in the data center host network QoS.</li><li>hypervisor_default - Bandwidth is controlled by local VDSM setting on sending host.</li><li>custom - Defined by user (in Mbps).</li></ul> |
+| migration_bandwidth_limit | UNDEF | Set the custom migration bandwidth limit. |
+| network | UNDEF | Management network of cluster to access cluster hosts. |
+| resilience_policy | UNDEF | The resilience policy defines how the virtual machines are prioritized in the migration.<br/>Following values are supported:<br/><ul><li>do_not_migrate - Prevents virtual machines from being migrated.</li><li>migrate - Migrates all virtual machines in order of their defined priority.</li><li>migrate_highly_available - Migrates only highly available virtual machines to prevent overloading other hosts.</li></ul> |
+| rng_sources | UNDEF | List that specify the random number generator devices that all hosts in the cluster will use. Supported generators are: <i>hwrng</i> and <i>random</i>. |
+| serial_policy | UNDEF | Specify a serial number policy for the virtual machines in the cluster.<br/>Following options are supported:<br/><ul><li>vm - Sets the virtual machine's UUID as its serial number.</li><li>host - Sets the host's UUID as the virtual machine's serial number.</li><li>custom - Allows you to specify a custom serial number in serial_policy_value.</li></ul> |
+| serial_policy_value | UNDEF | Allows you to specify a custom serial number. This parameter is used only when <i>serial_policy</i> is custom. |
+| spice_proxy | UNDEF | The proxy by which the SPICE client will connect to virtual machines. The address must be in the following format: protocol://[host]:[port] |
+| switch_type | UNDEF | Type of switch to be used by all networks in given cluster. Either legacy which is using linux brigde or ovs using Open vSwitch. |
+| threads_as_cores | UNDEF | If True the exposed host threads would be treated as cores which can be utilized by virtual machines. |
+| trusted_service | UNDEF | If True enable integration with an OpenAttestation server.|
+| virt | UNDEF | If True, hosts in this cluster will be used to run virtual machines. Default is true. |
+| gluster | UNDEF | If True, hosts in this cluster will be used as Gluster Storage server nodes, and not for running virtual machines. |
+| external_network_providers | UNDEF | List that specify the external network providers available in the cluster. |
+
+More information about the parameters can be found in the [ovirt_cluster](http://docs.ansible.com/ansible/ovirt_cluster_module.html) module documentation.
+
+#### Cluster Profile
+Possible `profile` options of cluster are `development` and `production`, their default values are described below:
+
+##### Development
+The `development` profile of the cluster have predefined following vaules:
+
+| Parameter | Value |
+|------------------|---------------|
+| ballooning | true |
+| ksm | true |
+| host_reason | false |
+| vm_reason | false |
+| memory_policy | server |
+| migration_policy | post_copy |
+
+##### Production
+The `production` profile of the cluster have predefined following vaules:
+
+| Parameter | Value |
+|-----------------------------------|--------------------|
+| ballooning | false |
+| ksm | false |
+| host_reason | true |
+| vm_reason | true |
+| memory_policy | disabled |
+| migration_policy | suspend_workload |
+| scheduling_policy | evenly_distributed |
+| ha_reservation | true |
+| fence_enabled | true |
+| fence_skip_if_connectivity_broken | true |
+| fence_skip_if_sd_active | true |
+
+### Hosts
+To setup hosts you can define list variable called `hosts`.
+The items in `hosts` list variable can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|------------------|---------------------------------------|
+| name | UNDEF (Required) | Name of the host. |
+| state | present | Specifies whether the host is `present` or `absent`. |
+| address | UNDEF | IP address or FQDN of the host. |
+| password | UNDEF | The host's root password. Required if <i>public_key</i> is false. |
+| public_key | UNDEF | If <i>true</i> the public key should be used to authenticate to host. |
+| cluster | UNDEF (Required) | The cluster that the host must connect to. |
+| timeout | 1800 | Maximum wait time for the host to be in an UP state. |
+| poll_interval | 20 | Polling interval to check the host status. |
+| hosted_engine | UNDEF | Specifies if the host is 'deploy' as hosted engine. |
+| power_management | UNDEF | The power managment. You can choose a predefined variables, see the tables below. |
+
+In case you cannot use `hosts` variable for whatever reason in your playbook, you can change this variable's name
+by overriding value of `hosts_var_name` variable. Example:
+```yaml
+- name: Set up oVirt infrastructure
+ hosts: engine
+
+ roles:
+ - role: ovirt.ovirt.infra
+ vars:
+ hosts_var_name: ovirt_hosts
+ ovirt_hosts:
+ - name: host_0
+ state: present
+ address: 1.2.3.4
+ password: 123456
+ cluster: Default
+```
+
+##### Host power managment
+The `power_management` have predefined following vaules:
+
+| Name | Default value | Description |
+|---------------|------------------|---------------------------------------|
+| address | UNDEF | Address of the power management interface. |
+| state | present | Should the host power managment be present/absent. |
+| username | UNDEF | Username to be used to connect to power management interface. |
+| password | UNDEF | Password of the user specified in C(username) parameter. |
+| type | UNDEF | Type of the power management. oVirt/RHV predefined values are drac5, ipmilan, rsa, bladecenter, alom, apc, apc_snmp, eps, wti, rsb, cisco_ucs, drac7, hpblade, ilo, ilo2, ilo3, ilo4, ilo_ssh, but user can have defined custom type. |
+| options | UNDEF | Dictionary of additional fence agent options (including Power Management slot). Additional information about options can be found at https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md. |
+| port | UNDEF | Power management interface port. |
+
+### Networks
+
+##### Logical networks
+To setup logical networks you can define list variable called `logical_networks`.
+The `logical_networks` list can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| name | UNDEF | Name of the network. |
+| state | present | Specifies whether the network state is `present` or `absent`. |
+| vlan_tag | UNDEF | Specify VLAN tag. |
+| vm_network | True | If True network will be marked as network for VM. |
+| mtu | UNDEF | Maximum transmission unit (MTU) of the network. |
+| description | UNDEF | Description of the network. |
+| clusters | UNDEF | List of dictionaries describing how the network is managed in specific cluster. |
+| label | UNDEF | Name of the label to assign to the network. |
+
+More information about the parameters can be found in the [ovirt_network](http://docs.ansible.com/ansible/ovirt_network_module.html) module documentation.
+
+##### Host networks
+To setup host networks you can define list variable called `host_networks`.
+The `host_networks` list can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| name | UNDEF | Name of the host. |
+| state | UNDEF | Specifies whether the network state is `present` or `absent`. |
+| check | UNDEF | If true, verifies the connection between the host and engine. |
+| save | UNDEF | If true, the network configuration will be persistent, by default it is temporary. |
+| bond | UNDEF | Dictionary describing the network bond. |
+| networks | UNDEF | Dictionary describing the networks to be attached to the interface or bond. |
+| labels | UNDEF | List of names of the network label to be assigned to the bond or interface. |
+| interface | UNDEF | Name of the network interface where the logical network should be attached. |
+
+More information about the parameters can be found in the [ovirt_host_network](http://docs.ansible.com/ansible/ovirt_host_network_module.html) module documentation.
+
+### Storages
+To setup storages you can define dictionary variable called `storages`.
+The value of item in `storages` dictionary can contain following parameters (the key is always a name of the storage):
+
+| Name | Default value | Description |
+|-----------------|----------------|---------------------------------------|
+| master | false | If true, the storage will be added as the first storage, meaning it will be the master storage. |
+| domain_function | data | The function of the storage domain. Possible values are: <ul><li>iso</li><li>export</li><li>data</li></ul>. |
+| localfs | UNDEF | Dictionary defining local storage. |
+| nfs | UNDEF | Dictionary defining NFS storage. |
+| iscsi | UNDEF | Dictionary defining iSCSI storage. |
+| posixfs | UNDEF | Dictionary defining PosixFS storage. |
+| fcp | UNDEF | Dictionary defining FCP storage. |
+| glusterfs | UNDEF | Dictionary defining glusterFS storage. |
+| discard_after_delete | UNDEF | If True storage domain blocks will be discarded upon deletion. Enabled by default. This parameter is relevant only for block based storage domains. |
+
+More information about the parameters can be found in the [ovirt_storage_domain](http://docs.ansible.com/ansible/ovirt_storage_domain_module.html) module documentation.
+
+### AAA JDBC
+##### Users
+To setup users in AAA JDBC provider you can define dictionary variable called `users`.
+The items in `users` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| state | present | Specifies whether the user is `present` or `absent`. |
+| name | UNDEF | Name of the user. |
+| authz_name | UNDEF | Authorization provider of the user. |
+| password | UNDEF | Password of the user. |
+| valid_to | UNDEF | Specifies the date that the account remains valid. |
+| attributes | UNDEF | A dict of attributes related to the user. Available attributes: <ul><li>department</li><li>description</li><li>displayName</li><li>email</li><li>firstName</li><li>lasName</li><li>title</li></ul>|
+
+##### User groups
+To setup user groups in AAA JDBC provider you can define dictionary variable called `user_groups`.
+The items in `user_groups` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| state | present | Specifies whether the group is `present` or `absent`. |
+| name | UNDEF | Name of the group. |
+| authz_name | UNDEF | Authorization provider of the group. |
+| users | UNDEF | List of users that belong to this group. |
+
+### Permissions
+To setup permissions of users or groups you can define dictionary variable called `permissions`.
+The items in `permissions` list variable can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|----------------------------|
+| state | present | Specifies whether the state of the permission is `present` or `absent`. |
+| user_name | UNDEF | The user to manage the permission for. |
+| group_name | UNDEF | Name of the group to manage the permission for. |
+| authz_name | UNDEF | Name of the authorization provider of the group or user. |
+| role | UNDEF | The role to be assigned to the user or group. |
+| object_type | UNDEF | The object type which should be used to assign the permission. Possible object types are:<ul><li>data_center</li><li>cluster</li><li>host</li><li>storage_domain</li><li>network</li><li>disk</li><li>vm</li><li>vm_pool</li><li>template</li><li>cpu_profile</li><li>disk_profile</li><li>vnic_profile</li><li>system</li></ul> |
+| object_name | UNDEF | Name of the object where the permission should be assigned. |
+
+### External providers
+To setup external providers you can define dictionary variable called `external_providers`.
+The items in `external_providers` list variable can contain following parameters:
+
+| Name | Default value | Description |
+|------------------------|---------------------|----------------------------------------------------------------------------------|
+| name | UNDEF (Required) | Name of the external provider. |
+| state | present | State of the external provider. Values can be: <ul><li>present</li><li>absent</li></ul>|
+| type | UNDEF (Required) | Type of the external provider. Values can be: <ul><li>os_image</li><li>network</li><li>os_volume</li><li>foreman</li></ul>|
+| url | UNDEF | URL where external provider is hosted. Required if state is present. |
+| username | UNDEF | Username to be used for login to external provider. Applicable for all types. |
+| password | UNDEF | Password of the user specified in username parameter. Applicable for all types. |
+| tenant | UNDEF | Name of the tenant. |
+| auth_url | UNDEF | Keystone authentication URL of the openstack provider. Required for: <ul><li>os_image</li><li>network</li><li>os_volume</li></ul>|
+| data_center | UNDEF | Name of the data center where provider should be attached. Applicable for type <i>os_volume</i>. |
+| authentication_keys | UNDEF | List of authentication keys. Each key is represented by dict like {'uuid': 'my-uuid', 'value': 'secret value'}. Added in ansible 2.6. Applicable for type <i>os_volume</i>. |
+
+More information about the parameters can be found in the [ovirt_external_provider](http://docs.ansible.com/ansible/ovirt_external_provider_module.html) module documentation.
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+ data_center_name: mydatacenter
+ compatibility_version: 4.4
+
+ mac_pools:
+ - mac_pool_name: "Default"
+ mac_pool_ranges:
+ - "00:1a:4a:16:01:51,00:1a:4a:16:01:61"
+
+ clusters:
+ - name: production
+ cpu_type: Intel Conroe Family
+ profile: production
+
+ hosts:
+ - name: myhost
+ address: 1.2.3.4
+ cluster: production
+ password: 123456
+ - name: myhost1
+ address: 5.6.7.8
+ cluster: production
+ password: 123456
+ power_management:
+ address: 9.8.7.6
+ username: root
+ password: password
+ type: ipmilan
+ options:
+ myoption1: x
+ myoption2: y
+ slot: myslot
+
+ storages:
+ mynfsstorage:
+ master: true
+ state: present
+ nfs:
+ address: 10.11.12.13
+ path: /the_path
+ myiscsistorage:
+ state: present
+ iscsi:
+ target: iqn.2014-07.org.ovirt:storage
+ port: 3260
+ address: 100.101.102.103
+ username: username
+ password: password
+ lun_id: 3600140551fcc8348ea74a99b6760fbb4
+ mytemplates:
+ domain_function: export
+ nfs:
+ address: 100.101.102.104
+ path: /exports/nfs/exported
+ myisostorage:
+ domain_function: iso
+ nfs:
+ address: 100.101.102.105
+ path: /exports/nfs/iso
+
+ logical_networks:
+ - name: mynetwork
+ clusters:
+ - name: production
+ assigned: yes
+ required: no
+ display: no
+ migration: yes
+ gluster: no
+
+ host_networks:
+ - name: myhost1
+ check: true
+ save: true
+ bond:
+ name: bond0
+ mode: 2
+ interfaces:
+ - eth2
+ - eth3
+ networks:
+ - name: mynetwork
+ boot_protocol: dhcp
+
+ users:
+ - name: john.doe
+ authz_name: internal-authz
+ password: 123456
+ valid_to: "2018-01-01 00:00:00Z"
+ - name: joe.doe
+ authz_name: internal-authz
+ password: 123456
+ valid_to: "2018-01-01 00:00:00Z"
+
+ user_groups:
+ - name: admins
+ authz_name: internal-authz
+ users:
+ - john.doe
+ - joe.doe
+
+ permissions:
+ - state: present
+ user_name: john.doe
+ authz_name: internal-authz
+ role: UserROle
+ object_type: cluster
+ object_name: production
+
+ - state: present
+ group_name: admins
+ authz_name: internal-authz
+ role: UserVmManager
+ object_type: cluster
+ object_name: production
+
+ external_providers:
+ - name: myglance
+ type: os_image
+ state: present
+ url: http://externalprovider.example.com:9292
+ username: admin
+ password: secret
+ tenant: admin
+ auth_url: http://externalprovider.example.com:35357/v2.0/
+
+ pre_tasks:
+ - name: Login to oVirt
+ ovirt_auth:
+ hostname: "{{ engine_fqdn }}"
+ username: "{{ engine_user }}"
+ password: "{{ engine_password }}"
+ ca_file: "{{ engine_cafile | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ tags:
+ - always
+
+ roles:
+ - ovirt.ovirt.infra
+
+ post_tasks:
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ tags:
+ - always
+```
+
+[![asciicast](https://asciinema.org/a/112415.png)](https://asciinema.org/a/112415)
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/defaults/main.yml
new file mode 100644
index 00000000..b7462989
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+data_center_state: 'present'
+hosts_var_name: 'hosts'
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra.yml
new file mode 100644
index 00000000..d4cb5826
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra.yml
@@ -0,0 +1,15 @@
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ - vars/ovirt_infra_vars.yml
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - vars/passwords.yml
+
+ roles:
+ - infra
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra_destroy.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra_destroy.yml
new file mode 100644
index 00000000..bc45dff9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/ovirt_infra_destroy.yml
@@ -0,0 +1,43 @@
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ recursive_cleanup: true
+ format_storages: true
+ data_center_name: Default
+ data_center_state: absent
+
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - vars/passwords.yml
+
+ pre_tasks:
+ - name: Login to oVirt
+ ovirt_auth:
+ hostname: "{{ engine_fqdn }}"
+ username: "{{ engine_user }}"
+ password: "{{ engine_password }}"
+ ca_file: "{{ engine_cafile | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ tags:
+ - always
+
+ roles:
+ - infra
+
+ post_tasks:
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ tags:
+ - always
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/ovirt_infra_vars.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/ovirt_infra_vars.yml
new file mode 100644
index 00000000..81f01c15
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/ovirt_infra_vars.yml
@@ -0,0 +1,108 @@
+---
+###########################
+# REST API variables
+###########################
+engine_fqdn: ovirt-engine.example.com
+engine_user: admin@internal
+engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+###########################
+# Common
+###########################
+compatibility_version: 4.4
+
+# Data center
+data_center_name: Default
+
+###########################
+# Clusters
+###########################
+clusters:
+ - name: Default
+ cpu_type: Intel Conroe Family
+ profile: production
+
+###########################
+# Hosts
+###########################
+hosts:
+ - name: host1
+ address: 1.2.3.5
+ cluster: Default
+ password: 123456
+ - name: host2
+ address: 1.2.3.6
+ cluster: Default
+ password: 123456
+
+###########################
+# Storage
+###########################
+storages:
+ data:
+ master: true
+ state: present
+ nfs:
+ address: 1.2.3.4
+ path: /om02
+
+###########################
+# Networks
+###########################
+logical_networks:
+ - name: int_network_002
+ clusters:
+ - name: Default
+ assigned: true
+ required: true
+ display: false
+ migration: true
+ gluster: false
+
+host_networks:
+ - name: host2
+ check: true
+ save: true
+ bond:
+ name: bond0
+ mode: 2
+ interfaces:
+ - eth1
+ - eth2
+ networks:
+ - name: int_network_002
+ boot_protocol: dhcp
+
+###########################
+# Users & Groups
+###########################
+users:
+ - name: user1
+ authz_name: internal-authz
+ password: 1234568
+ valid_to: "2018-01-01 00:00:00Z"
+ - name: user2
+ authz_name: internal-authz
+ password: 1234568
+ valid_to: "2018-01-01 00:00:00Z"
+
+user_groups:
+ - name: group1
+ authz_name: internal-authz
+ users:
+ - user1
+
+permissions:
+ - state: present
+ user_name: user1
+ authz_name: internal-authz
+ role: UserRole
+ object_type: cluster
+ object_name: Default
+
+ - state: present
+ group_name: group1
+ authz_name: internal-authz
+ role: UserRole
+ object_type: cluster
+ object_name: Default
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/passwords.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/passwords.yml
new file mode 100644
index 00000000..92c7613c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/examples/vars/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/README.md
new file mode 100644
index 00000000..b20ca528
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/README.md
@@ -0,0 +1,60 @@
+oVirt AAA JDBC
+==============
+
+The `aaa_jdbc` role manages users and groups in an AAA JDBC extension.
+
+Role Variables
+--------------
+
+The items in `users` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| state | present | Specifies whether the user is `present` or `absent`. |
+| name | UNDEF | Name of the user. |
+| authz_name | UNDEF | Authorization provider of the user. |
+| password | UNDEF | Password of the user. |
+| valid_to | UNDEF | Specifies the date that the account remains valid. |
+| attributes | UNDEF | A dict of attributes related to the user. Available attributes: <ul><li>department</li><li>description</li><li>displayName</li><li>email</li><li>firstName</li><li>lasName</li><li>title</li></ul>|
+
+The items in `user_groups` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| state | present | Specifies whether the group is `present` or `absent`. |
+| name | UNDEF | Name of the group. |
+| authz_name | UNDEF | Authorization provider of the group. |
+| users | UNDEF | List of users that belong to this group. |
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt AAA jdbc
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ users:
+ - name: user1
+ authz_name: internal-authz
+ password: 1234568
+ valid_to: "2018-01-01 00:00:00Z"
+ - name: user2
+ authz_name: internal-authz
+ password: 1234568
+ valid_to: "2018-01-01 00:00:00Z"
+ attributes:
+ firstName: 'alice'
+ department: 'Quality Engineering'
+
+ user_groups:
+ - name: group1
+ authz_name: internal-authz
+ users:
+ - user1
+
+ roles:
+ - ovirt.ovirt.infra.roles.aaa_jdbc
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/defaults/main.yml
new file mode 100644
index 00000000..260312db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+aaa_jdbc_prefix: /usr/bin
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/tasks/main.yml
new file mode 100644
index 00000000..d4ac4a71
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/aaa_jdbc/tasks/main.yml
@@ -0,0 +1,88 @@
+---
+################################
+## User & groups internal
+################################
+- name: Check if ovirt-aaa-jdbc-tool exists
+ stat:
+ path: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool"
+ register: aaa_jdbc_path_stat
+ tags:
+ - ovirt-aaa-jdbc
+ - users
+
+- name: Fail the role if aaa-jdbc-tool don't exist
+ fail:
+ msg: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool doesn't exist, are you on engine machine?"
+ when: not aaa_jdbc_path_stat.stat.exists
+ tags:
+ - ovirt-aaa-jdbc
+ - users
+
+- name: Manage internal users
+ no_log: true
+ command: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool user {{ (item.state is undefined or item.state == 'present') | ternary('add','delete') }} {{ item.name }}"
+ with_items:
+ - "{{ users | default([]) }}"
+ register: out_users
+ changed_when: "out_users.rc != 5 and out_users.rc != 4"
+ # 5 == user already exists
+ # 4 == no user to be removed
+ # 0 == all OK
+ failed_when: "out_users.rc != 5 and out_users.rc != 0 and out_users.rc != 4"
+ tags:
+ - ovirt-aaa-jdbc
+ - users
+
+- name: Update users according to attributes
+ command: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool user edit {{ item.name }} {% for attr, value in item['attributes'].items() %} --attribute={{ attr }}='{{ value }}' {% endfor %}"
+ with_items:
+ - "{{ users | default([]) }}"
+ register: out_users
+ when: "item.attributes is defined"
+ changed_when: "out_users.rc != 5 and out_users.rc != 4"
+ # 5 == user already exists
+ # 4 == no user to be removed
+ # 0 == all OK
+ failed_when: "out_users.rc != 5 and out_users.rc != 0 and out_users.rc != 4"
+ tags:
+ - ovirt-aaa-jdbc
+ - users
+
+# FIXME: when user try to change the password which was already set in history
+# but is not current password we continue with changed=false:
+- name: Manage internal users passwords
+ no_log: true
+ command: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool user password-reset {{ item.name }} --password=\"pass:{{ item.password }}\" --password-valid-to=\"{{ item.valid_to }}\""
+ with_items:
+ - "{{ users | default([]) }}"
+ register: out_users
+ when: "item.password is defined"
+ changed_when: "out_users.rc != 1"
+ failed_when: "out_users.rc != 1 and out_users.rc != 0"
+ tags:
+ - ovirt-aaa-jdbc
+ - users
+
+- name: Manage internal groups
+ command: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool group {{ (item.state is undefined or item.state == 'present') | ternary('add','delete') }} {{ item.name }}"
+ with_items:
+ - "{{ user_groups | default([]) }}"
+ register: out_groups
+ changed_when: "out_groups.rc != 5 and out_groups.rc != 4"
+ failed_when: "out_groups.rc != 5 and out_groups.rc != 0 and out_groups.rc != 4"
+ tags:
+ - ovirt-aaa-jdbc
+ - user_groups
+
+# FIXME: Support only add, if the user is removed from list, it's not removed from the group:
+- name: Manage groups members
+ command: "{{ aaa_jdbc_prefix }}/ovirt-aaa-jdbc-tool group-manage {{ (item.state is undefined or item.state == 'present') | ternary('useradd','userdel') }} {{ item.0.name }} --user {{ item.1 }}"
+ with_subelements:
+ - "{{ user_groups | default([]) }}"
+ - users
+ register: out_group_member
+ changed_when: "out_group_member.rc != 3 "
+ failed_when: "out_group_member.rc != 3 and out_group_member.rc != 0"
+ tags:
+ - ovirt-aaa-jdbc
+ - user_groups
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/README.md
new file mode 100644
index 00000000..c05c940f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/README.md
@@ -0,0 +1,106 @@
+oVirt Clusters
+==============
+
+The `clusters` role is used set up oVirt clusters.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|-----------------------|-----------------------|-----------------------------------------|
+| clusters | UNDEF | List of dictionaries that describe the cluster. |
+| data_center_name | UNDEF (Required) | Name of the data center. |
+| compatibility_version | UNDEF (Required) | Compatibility version of data center. |
+
+The items in `clusters` list can contain the following parameters:
+
+| Name | Default value | Description |
+|-----------------------------------|---------------------|-----------------------------------------|
+| name | UNDEF (Required) | Name of the cluster. |
+| state | present | State of the cluster. |
+| cpu_type | Intel Conroe Family | CPU type of the cluster. |
+| profile | UNDEF | The cluster profile. You can choose a predefined cluster profile, see the tables below. |
+| ballooning | UNDEF | If True enable memory balloon optimization. Memory balloon is used to re-distribute / reclaim the host memory based on VM needs in a dynamic way. |
+| description | UNDEF | Description of the cluster. |
+| ksm | UNDEF | I True MoM enables to run Kernel Same-page Merging KSM when necessary and when it can yield a memory saving benefit that outweighs its CPU cost. |
+| ksm_numa | UNDEF | If True enables KSM ksm for best berformance inside NUMA nodes. |
+| vm_reason | UNDEF | If True enable an optional reason field when a virtual machine is shut down from the Manager, allowing the administrator to provide an explanation for the maintenance. |
+| host_reason | UNDEF | If True enable an optional reason field when a host is placed into maintenance mode from the Manager, allowing the administrator to provide an explanation for the maintenance. |
+| memory_policy<br/>alias: <i>performance_preset</i> | UNDEF | <ul><li>disabled - Disables memory page sharing.</li><li>server - Sets the memory page sharing threshold to 150% of the system memory on each host.</li><li>desktop - Sets the memory page sharing threshold to 200% of the system memory on each host.</li></ul> |
+| migration_policy | UNDEF | A migration policy defines the conditions for live migrating virtual machines in the event of host failure. Following policies are supported:<ul><li>legacy - Legacy behavior of 3.6 version.</li><li>minimal_downtime - Virtual machines should not experience any significant downtime.</li><li>suspend_workload - Virtual machines may experience a more significant downtime.</li><li>post_copy - Virtual machines should not experience any significant downtime. If the VM migration is not converging for a long time, the migration will be switched to post-copy</li></ul> |
+| scheduling_policy | UNDEF | The scheduling policy used by the cluster. |
+| ha_reservation | UNDEF | If True enable the oVirt/RHV to monitor cluster capacity for highly available virtual machines. |
+| fence_enabled | UNDEF | If True, enables fencing on the cluster. |
+| fence_connectivity_threshold | UNDEF | The threshold used by <i>fence_skip_if_connectivity_broken</i>. |
+| fence_skip_if_connectivity_broken | UNDEF | If True, fencing will be temporarily disabled if the percentage of hosts in the cluster that are experiencing connectivity issues is greater than or equal to the defined threshold. |
+| fence_skip_if_sd_active | UNDEF | If True, any hosts in the cluster that are Non Responsive and still connected to storage will not be fenced. |
+| mac_pool | UNDEF | Mac pool name. |
+| comment | UNDEF | Comment of the cluster. |
+| migration_bandwidth | UNDEF | The bandwidth settings define the maximum bandwidth of both outgoing and incoming migrations per host.<br/>Following bandwidth options are supported:<br/><ul><li>auto - Bandwidth is copied from the rate limit [Mbps] setting in the data center host network QoS.</li><li>hypervisor_default - Bandwidth is controlled by local VDSM setting on sending host.</li><li>custom - Defined by user (in Mbps).</li></ul> |
+| migration_bandwidth_limit | UNDEF | Set the custom migration bandwidth limit. |
+| network | UNDEF | Management network of cluster to access cluster hosts. |
+| resilience_policy | UNDEF | The resilience policy defines how the virtual machines are prioritized in the migration.<br/>Following values are supported:<br/><ul><li>do_not_migrate - Prevents virtual machines from being migrated.</li><li>migrate - Migrates all virtual machines in order of their defined priority.</li><li>migrate_highly_available - Migrates only highly available virtual machines to prevent overloading other hosts.</li></ul> |
+| rng_sources | UNDEF | List that specify the random number generator devices that all hosts in the cluster will use. Supported generators are: <i>hwrng</i> and <i>random</i>. |
+| serial_policy | UNDEF | Specify a serial number policy for the virtual machines in the cluster.<br/>Following options are supported:<br/><ul><li>vm - Sets the virtual machine's UUID as its serial number.</li><li>host - Sets the host's UUID as the virtual machine's serial number.</li><li>custom - Allows you to specify a custom serial number in serial_policy_value.</li></ul> |
+| serial_policy_value | UNDEF | Allows you to specify a custom serial number. This parameter is used only when <i>serial_policy</i> is custom. |
+| spice_proxy | UNDEF | The proxy by which the SPICE client will connect to virtual machines. The address must be in the following format: protocol://[host]:[port] |
+| switch_type | UNDEF | Type of switch to be used by all networks in given cluster. Either legacy which is using linux brigde or ovs using Open vSwitch. |
+| threads_as_cores | UNDEF | If True the exposed host threads would be treated as cores which can be utilized by virtual machines. |
+| trusted_service | UNDEF | If True enable integration with an OpenAttestation server.|
+| virt | UNDEF | If True, hosts in this cluster will be used to run virtual machines. Default is true. |
+| gluster | UNDEF | If True, hosts in this cluster will be used as Gluster Storage server nodes, and not for running virtual machines. |
+| external_network_providers | UNDEF | List that specify the external network providers available in the cluster. |
+
+More information about the parameters can be found in the [Ansible documentation](http://docs.ansible.com/ansible/ovirt_cluster_module.html).
+
+Possible `profile` options are `development` and `production`, their default values are described below:
+
+`Development`:
+
+| Parameter | Value |
+|------------------|---------------|
+| ballooning | true |
+| ksm | true |
+| host_reason | false |
+| vm_reason | false |
+| memory_policy | server |
+| migration_policy | post_copy |
+
+`Production`:
+
+| Parameter | Value |
+|-----------------------------------|--------------------|
+| ballooning | false |
+| ksm | false |
+| host_reason | true |
+| vm_reason | true |
+| memory_policy | disabled |
+| migration_policy | suspend_workload |
+| scheduling_policy | evenly_distributed |
+| ha_reservation | true |
+| fence_enabled | true |
+| fence_skip_if_connectivity_broken | true |
+| fence_skip_if_sd_active | true |
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt clusters
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ data_center_name: mydatacenter
+ compatibility_version: 4.4
+
+ clusters:
+ - name: production
+ cpu_type: Intel Conroe Family
+ profile: production
+ mac_pool: production_mac_pools
+
+ roles:
+ - ovirt.ovirt.infra.roles.clusters
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/tasks/main.yml
new file mode 100644
index 00000000..587c7593
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/tasks/main.yml
@@ -0,0 +1,45 @@
+---
+- name: Add clusters
+ ovirt_cluster:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ name: "{{ item.name }}"
+ data_center: "{{ data_center_name }}"
+ cpu_type: "{{ item.cpu_type | default('Intel Conroe Family') }}"
+ compatibility_version: "{{ compatibility_version }}"
+ mac_pool: "{{ item.mac_pool | default(omit) }}"
+ comment: "{{ item.comment | default(omit) }}"
+ external_network_providers: "{{ item.external_network_providers | default(omit) }}"
+ fence_connectivity_threshold: "{{ item.fence_connectivity_threshold | default(omit) }}"
+ gluster: "{{ item.gluster | default(omit) }}"
+ migration_bandwidth: "{{ item.migration_bandwidth | default(omit) }}"
+ migration_bandwidth_limit: "{{ item.migration_bandwidth_limit | default(omit) }}"
+ network: "{{ item.network | default(omit) }}"
+ resilience_policy: "{{ item.resilience_policy | default(omit) }}"
+ rng_sources: "{{ item.rng_sources | default(omit) }}"
+ serial_policy: "{{ item.serial_policy | default(omit) }}"
+ serial_policy_value: "{{ item.serial_policy_value | default(omit) }}"
+ spice_proxy: "{{ item.spice_proxy | default(omit) }}"
+ switch_type: "{{ item.switch_type | default(omit) }}"
+ threads_as_cores: "{{ item.threads_as_cores | default(omit) }}"
+ trusted_service: "{{ item.trusted_service | default(omit) }}"
+ virt: "{{ item.virt | default(omit) }}"
+ # Parameters part of profile:
+ ballooning: "{{ item.ballooning | default(profiles[item.profile | default('_')].ballooning) | default(omit) }}"
+ description: "{{ item.description| default(profiles[item.profile | default('_')].description) | default(omit) }}"
+ ksm: "{{ item.ksm | default(profiles[item.profile | default('_')].ksm) | default(omit) }}"
+ ksm_numa: "{{ item.ksm_numa | default(profiles[item.profile | default('_')].ksm_numa) | default(omit) }}"
+ host_reason: "{{ item.host_reason | default(profiles[item.profile | default('_')].host_reason) | default(omit) }}"
+ vm_reason: "{{ item.vm_reason | default(profiles[item.profile | default('_')].vm_reason) | default(omit) }}"
+ memory_policy: "{{ item.memory_policy | default(item.performance_preset) | default(profiles[item.profile | default('_')].memory_policy) | default(profiles[item.profile | default('_')].performance_preset) | default('disabled') }}"
+ migration_policy: "{{ item.migration_policy | default(profiles[item.profile | default('_')].migration_policy) | default(omit) }}"
+ scheduling_policy: "{{ item.scheduling_policy | default(profiles[item.profile | default('_')].scheduling_policy) | default(omit) }}"
+ ha_reservation: "{{ item.ha_reservation | default(profiles[item.profile | default('_')].ha_reservation) | default(omit) }}"
+ fence_enabled: "{{ item.fence_enabled | default(profiles[item.profile | default('_')].fence_enabled) | default(omit) }}"
+ fence_skip_if_connectivity_broken: "{{ item.fence_skip_if_connectivity_broken | default(profiles[item.profile | default('_')].fence_skip_if_connectivity_broken) | default(omit) }}"
+ fence_skip_if_sd_active: "{{ item.fence_skip_if_sd_active | default(profiles[item.profile | default('_')].fence_skip_if_sd_active) | default(omit) }}"
+
+ with_items:
+ - "{{ clusters | default([]) }}"
+ tags:
+ - clusters
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/vars/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/vars/main.yml
new file mode 100644
index 00000000..9a82754b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/clusters/vars/main.yml
@@ -0,0 +1,27 @@
+---
+# Define a profiles for cluster
+profiles:
+ development:
+ description: Development cluster
+ ballooning: true
+ ksm: true
+ host_reason: false
+ vm_reason: false
+ memory_policy: server
+ migration_policy: post_copy
+
+ production:
+ description: Production cluster
+ ballooning: false
+ ksm: false
+ host_reason: true
+ vm_reason: true
+ memory_policy: disabled
+ migration_policy: suspend_workload
+ scheduling_policy: evenly_distributed
+ ha_reservation: true
+ fence_enabled: true
+ fence_skip_if_connectivity_broken: true
+ fence_skip_if_sd_active: true
+
+ _: false
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/README.md
new file mode 100644
index 00000000..215b39ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/README.md
@@ -0,0 +1,30 @@
+oVirt Datacenter Cleanup
+========================
+
+The `datacenter_cleanup` role is used to cleanup all entities inside
+oVirt datacenters and finally remove the datacenters themselves.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|--------------------------|-----------------------|--------------------------------------|
+| data_center_name | UNDEF | Name of the data center. |
+| format_storages | false | Whether role should format storages when removing them. |
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ data_center_name: mydatacenter
+ format_storages: true
+
+ roles:
+ - ovirt.ovirt.infra.roles.datacenter_cleanup
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/defaults/main.yml
new file mode 100644
index 00000000..336a45af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+format_storages: false
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/cluster_and_hosts.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/cluster_and_hosts.yml
new file mode 100644
index 00000000..64ec4a88
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/cluster_and_hosts.yml
@@ -0,0 +1,21 @@
+---
+- name: Find existing Hosts
+ ovirt_host_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "cluster={{ cluster_item.name }}"
+ register: host_info
+
+- name: Remove Hosts
+ ovirt_host:
+ state: absent
+ name: "{{ host_item.name }}"
+ auth: "{{ ovirt_auth }}"
+ with_items: "{{ host_info.ovirt_hosts }}"
+ loop_control:
+ loop_var: host_item
+
+- name: Remove Cluster
+ ovirt_cluster:
+ state: absent
+ name: "{{ cluster_item.name }}"
+ auth: "{{ ovirt_auth }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/datacenter.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/datacenter.yml
new file mode 100644
index 00000000..28b1b78a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/datacenter.yml
@@ -0,0 +1,6 @@
+---
+- name: Remove Datacenter
+ ovirt_datacenter:
+ state: absent
+ name: "{{ data_center_name }}"
+ auth: "{{ ovirt_auth }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/disks.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/disks.yml
new file mode 100644
index 00000000..5cbf9dd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/disks.yml
@@ -0,0 +1,16 @@
+---
+- name: Find existing Disks
+ ovirt_disk_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: disk_info
+
+- name: Remove Disks
+ ovirt_disk:
+ state: absent
+ id: "{{ ovirt_item.id }}"
+ auth: "{{ ovirt_auth }}"
+ with_items: "{{ disk_info.ovirt_disks }}"
+ when: ovirt_item.name != 'OVF_STORE'
+ loop_control:
+ loop_var: ovirt_item
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/main.yml
new file mode 100644
index 00000000..972e23f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/main.yml
@@ -0,0 +1,39 @@
+---
+- name: Remove VMPools
+ include: vm_pools.yml
+
+- name: Remove VMs
+ include: vms.yml
+
+- name: Remove Templates
+ include: templates.yml
+
+- name: Remove Disks
+ include: disks.yml
+
+- name: Find existing Storage Domains in Datacenter
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: sd_info
+
+- name: Remove all Storage Domains except master
+ include: storages_pre.yml
+
+- name: Find existing clusters in Datacenter
+ ovirt_cluster_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: cluster_info
+
+- name: Remove Datacenter
+ include: datacenter.yml
+
+- name: Remove master Storage Domain
+ include: storages_last.yml
+
+- name: Remove Clusters and Hosts
+ include: cluster_and_hosts.yml
+ with_items: "{{ cluster_info.ovirt_clusters }}"
+ loop_control:
+ loop_var: cluster_item
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_last.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_last.yml
new file mode 100644
index 00000000..9b7f0958
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_last.yml
@@ -0,0 +1,11 @@
+---
+- name: Remove master Storage Domain
+ ovirt_storage_domain:
+ state: absent
+ id: "{{ ovirt_item.id }}"
+ auth: "{{ ovirt_auth }}"
+ format: "{{ format_storages }}"
+ with_items: "{{ ovirt_storage_domains }}"
+ when: ovirt_item.master
+ loop_control:
+ loop_var: ovirt_item
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_pre.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_pre.yml
new file mode 100644
index 00000000..26c70710
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/storages_pre.yml
@@ -0,0 +1,28 @@
+---
+- name: Find existing Storage Domains
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: sd_info
+
+- name: Remove Storage Domains apart from master
+ ovirt_storage_domain:
+ state: absent
+ id: "{{ ovirt_item.id }}"
+ auth: "{{ ovirt_auth }}"
+ format: "{{ format_storages }}"
+ with_items: "{{ sd_info.ovirt_storage_domains }}"
+ when: not ovirt_item.master
+ loop_control:
+ loop_var: ovirt_item
+
+- name: Put in maintainance master Storage Domain
+ ovirt_storage_domain:
+ state: maintenance
+ id: "{{ ovirt_item.id }}"
+ data_center: "{{ data_center_name }}"
+ auth: "{{ ovirt_auth }}"
+ with_items: "{{ sd_info.ovirt_storage_domains }}"
+ when: ovirt_item.master
+ loop_control:
+ loop_var: ovirt_item
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/templates.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/templates.yml
new file mode 100644
index 00000000..c26d26dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/templates.yml
@@ -0,0 +1,16 @@
+---
+- name: Find existing Templates
+ ovirt_template_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: template_info
+
+- name: Remove Templates
+ ovirt_template:
+ state: absent
+ auth: "{{ ovirt_auth }}"
+ id: "{{ ovirt_item.id }}"
+ with_items: "{{ template_info.ovirt_templates }}"
+ when: ovirt_item.name != 'Blank'
+ loop_control:
+ loop_var: ovirt_item
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vm_pools.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vm_pools.yml
new file mode 100644
index 00000000..90eae12f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vm_pools.yml
@@ -0,0 +1,15 @@
+---
+- name: Find existing VMPools
+ ovirt_vmpool_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: vmpool_info
+
+- name: Remove VMPools
+ ovirt_vmpool:
+ state: absent
+ name: "{{ ovirt_item.name }}"
+ auth: "{{ ovirt_auth }}"
+ with_items: "{{ vmpool_info.ovirt_vm_pools }}"
+ loop_control:
+ loop_var: ovirt_item
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vms.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vms.yml
new file mode 100644
index 00000000..9e86960d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenter_cleanup/tasks/vms.yml
@@ -0,0 +1,15 @@
+---
+- name: Find existing VMs
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "datacenter={{ data_center_name }}"
+ register: vm_info
+
+- name: Remove VMs
+ ovirt_vm:
+ state: absent
+ id: "{{ ovirt_item.id }}"
+ auth: "{{ ovirt_auth }}"
+ with_items: "{{ vm_info.ovirt_vms }}"
+ loop_control:
+ loop_var: ovirt_item
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/README.md
new file mode 100644
index 00000000..8baf33ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/README.md
@@ -0,0 +1,56 @@
+oVirt Datacenters
+=================
+
+The `datacenters` role is used to set up or cleanup oVirt datacenters.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|--------------------------|-----------------------|--------------------------------------|
+| data_center_name | UNDEF | Name of the data center. |
+| data_center_description | UNDEF | Description of the data center. |
+| data_center_local | false | Specify whether the data center is shared or local. |
+| compatibility_version | UNDEF | Compatibility version of data center. |
+| data_center_state | present | Specify whether the datacenter should be present or absent. |
+| recursive_cleanup | false | Specify whether to recursively remove all entities inside DC. Valid only when state == absent. |
+| format_storages | false | Specify whether to format ALL the storages that are going to be removed as part of the DC. Valid only when data_center_state == absent and recursive_cleanup == true. |
+
+Example Playbooks
+----------------
+
+```yaml
+# Example 1
+
+- name: Add oVirt datacenter
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ data_center_name: mydatacenter
+ data_center_description: mydatacenter
+ data_center_local: false
+ compatibility_version: 4.4
+
+ roles:
+ - ovirt.ovirt.infra.roles.datacenters
+```
+
+```yaml
+# Example 2
+
+- name: Recursively remove oVirt datacenter
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ data_center_name: mydatacenter
+ data_center_state: absent
+ recursive_cleanup: true
+ format_storages: true
+
+ roles:
+ - ovirt.ovirt.infra.roles.datacenters
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/defaults/main.yml
new file mode 100644
index 00000000..aa6ffe11
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+data_center_state: 'present'
+recursive_cleanup: false
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/tasks/main.yml
new file mode 100644
index 00000000..45656d4f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/datacenters/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+- name: Add data center
+ ovirt_datacenter:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ name: "{{ data_center_name }}"
+ description: "{{ data_center_description | default(omit) }}"
+ local: "{{ data_center_local | default(false) }}"
+ compatibility_version: "{{ compatibility_version }}"
+ when: data_center_state=='present'
+ tags:
+ - datacenters
+
+- name: Remove data center
+ ovirt_datacenter:
+ auth: "{{ ovirt_auth }}"
+ state: absent
+ name: "{{ data_center_name }}"
+ when: data_center_state=='absent' and not recursive_cleanup
+ tags:
+ - datacenters
+
+- name: Recursively remove data center
+ import_role:
+ name: ovirt.ovirt.infra.roles.datacenter_cleanup
+ when: data_center_state=='absent' and recursive_cleanup
+ tags:
+ - datacenters
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/README.md
new file mode 100644
index 00000000..11b16fef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/README.md
@@ -0,0 +1,73 @@
+oVirt External Providers
+========================
+
+The `external_providers` role is used set up oVirt external providers.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|-----------------------|-----------------------|-----------------------------------------------------------|
+| external_providers | UNDEF | List of dictionaries that describe the external provider. |
+
+The items in `external_providers` list can contain the following parameters:
+
+| Name | Default value | Description |
+|------------------------|---------------------|----------------------------------------------------------------------------------|
+| name | UNDEF (Required) | Name of the external provider. |
+| state | present | State of the external provider. Values can be: <ul><li>present</li><li>absent</li></ul>|
+| type | UNDEF (Required) | Type of the external provider. Values can be: <ul><li>os_image</li><li>network</li><li>os_volume</li><li>foreman</li></ul>|
+| url | UNDEF | URL where external provider is hosted. Required if state is present. |
+| username | UNDEF | Username to be used for login to external provider. Applicable for all types. |
+| password | UNDEF | Password of the user specified in username parameter. Applicable for all types. |
+| tenant | UNDEF | Name of the tenant. |
+| auth_url | UNDEF | Keystone authentication URL of the openstack provider. Required for: <ul><li>os_image</li><li>network</li><li>os_volume</li></ul>|
+| data_center | UNDEF | Name of the data center where provider should be attached. Applicable for type <i>os_volume</i>. |
+| authentication_keys | UNDEF | List of authentication keys. Each key is represented by dict like {'uuid': 'my-uuid', 'value': 'secret value'}. Added in ansible 2.6. Applicable for type <i>os_volume</i>. |
+
+More information about the parameters can be found in the [Ansible documentation](http://docs.ansible.com/ansible/latest/ovirt_external_provider_module.html).
+
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+
+ external_providers:
+ - name: myglance
+ type: os_image
+ state: present
+ url: http://externalprovider.example.com:9292
+ username: admin
+ password: secret
+ tenant: admin
+ auth_url: http://externalprovider.example.com:35357/v2.0
+ - name: mycinder
+ type: os_volume
+ state: present
+ url: http://externalprovider.example.com:9292
+ username: admin
+ password: secret
+ tenant: admin
+ auth_url: http://externalprovider.example.com:5000/v2.0
+ authentication_keys:
+ -
+ uuid: "1234567-a1234-12a3-a234-123abc45678"
+ value: "ABCD00000000111111222333445w=="
+ - name: public-glance
+ type: os_image
+ state: present
+ url: http://glance.public.com:9292
+ - name: external-provider-to-be-removed
+ type: os_image
+ state: absent
+
+ roles:
+ - ovirt.ovirt.infra.roles.external_providers
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/tasks/main.yml
new file mode 100644
index 00000000..60fc5511
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/external_providers/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- name: Add external providers
+ ovirt_external_provider:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item.name }}"
+ state: "{{ item.state | default('present') }}"
+ type: "{{ item.type }}"
+ url: "{{ item.url }}"
+ password: "{{ item.password | default(omit) }}"
+ tenant: "{{ item.tenant | default(omit) }}"
+ auth_url: "{{ item.auth_url | default(omit) }}"
+ data_center: "{{ item.data_center | default(omit) }}"
+ username: "{{ item.username | default(omit) }}"
+ authentication_keys: "{{ item.authentication_keys | default(omit) }}"
+ with_items:
+ - "{{ external_providers | default([]) }}"
+ tags:
+ - external_providers
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/README.md
new file mode 100644
index 00000000..ea20ef74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/README.md
@@ -0,0 +1,41 @@
+oVirt Hosts
+===========
+
+The `hosts` role is used to set up oVirt hosts.
+
+Role Variables
+--------------
+
+The `hosts` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------|------------------|---------------------------------------|
+| name | UNDEF (Required) | Name of the host. |
+| state | present | Specifies whether the host is `present` or `absent`. |
+| address | UNDEF (Required) | IP address or FQDN of the host. |
+| password | UNDEF | The host's root password. Required if <i>public_key</i> is false. |
+| public_key | UNDEF | If <i>true</i> the public key should be used to authenticate to host. |
+| cluster | UNDEF (Required) | The cluster that the host must connect to. |
+| timeout | 1200 | Maximum wait time for the host to be in an UP state. |
+| poll_interval | 20 | Polling interval to check the host status. |
+| hosted_engine | UNDEF | Specifies whether to 'deploy' or 'undeploy' hosted-engine to node. |
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ hosts:
+ - name: myhost
+ address: 1.2.3.4
+ cluster: production
+ password: 123456
+
+ roles:
+ - ovirt.ovirt.infra.roles.hosts
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/defaults/main.yml
new file mode 100644
index 00000000..51f58307
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+ovirt_hosts_max_timeout: 2100
+ovirt_hosts_add_timeout: 1800
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/tasks/main.yml
new file mode 100644
index 00000000..dac7da40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/hosts/tasks/main.yml
@@ -0,0 +1,86 @@
+---
+- name: Get hosts
+ ovirt_host_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "{{ ovirt_infra_hosts | map(attribute='name') | map('regex_replace', '(.*)', 'name=\\1') | list | join(' or ') }} status=installfailed"
+ register: host_info
+ when: ovirt_infra_hosts | length > 0
+ tags:
+ - hosts
+ - reinstall
+
+- name: Reinstall hosts
+ ovirt_host:
+ auth: "{{ ovirt_auth }}"
+ state: reinstalled
+ name: "{{ item.name }}"
+ public_key: "{{ item.password is undefined }}"
+ password: "{{ item.password | default(omit) }}"
+ with_items:
+ - "{{ host_info.ovirt_hosts | default([]) }}"
+ loop_control:
+ label: "{{ item.name }}"
+ tags:
+ - hosts
+ - reinstall
+
+- name: Add hosts
+ ovirt_host:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ name: "{{ item.name }}"
+ address: "{{ item.address | default(omit) }}"
+ cluster: "{{ item.cluster }}"
+ password: "{{ item.password | default(omit) }}"
+ public_key: "{{ item.public_key | default(omit) }}"
+ override_iptables: true
+ timeout: "{{ item.timeout | default(ovirt_hosts_add_timeout) }}"
+ poll_interval: "{{ item.poll_interval | default(20) }}"
+ hosted_engine: "{{ item.hosted_engine | default(omit) }}"
+ with_items: "{{ ovirt_infra_hosts }}"
+ loop_control:
+ label: "{{ item.name }}"
+ async: "{{ ovirt_hosts_max_timeout }}"
+ poll: 0
+ register: add_hosts
+ tags:
+ - hosts
+
+- name: Wait for hosts to be added
+ async_status: "jid={{ item.ansible_job_id }}"
+ register: job_result
+ with_items:
+ - "{{ add_hosts.results | ovirt.ovirt.removesensitivevmdata }}"
+ loop_control:
+ label: "{{ item.item.name }}"
+ tags:
+ - hosts
+ ignore_errors: yes
+ until: job_result.finished
+ retries: "{{ ovirt_hosts_max_timeout // 20 }}"
+ delay: 20
+
+- name: Fail the play with unexpected error
+ fail:
+ msg: The host deploy failed with message '{{ item["exception"] }}'.
+ when: item.failed and "the following networks are missing" not in item["exception"]
+ with_items:
+ - "{{ job_result.results }}"
+
+- name: Set Power Management
+ ovirt_host_pm:
+ auth: "{{ ovirt_auth }}"
+ address: "{{ item.power_management.address | default(omit) }}"
+ state: "{{ item.power_management.state | default(omit) }}"
+ username: "{{ item.power_management.username | default(omit) }}"
+ password: "{{ item.power_management.password | default(omit) }}"
+ type: "{{ item.power_management.type | default(omit) }}"
+ options: "{{ item.power_management.options | default(omit) }}"
+ port: "{{ item.power_management.port | default(omit) }}"
+ name: "{{ item.name }}"
+ when: item.power_management is defined and not (item.state is defined and item.state == 'absent')
+ with_items: "{{ ovirt_infra_hosts }}"
+ loop_control:
+ label: "{{ item.name }}"
+ tags:
+ - hosts
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/README.md
new file mode 100644
index 00000000..a5e5b3d2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/README.md
@@ -0,0 +1,39 @@
+oVirt Mac Pools
+=================
+
+The `mac_pools` role is used to set up oVirt mac pools.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|-----------------------|-----------------------|-----------------------------------------------------------|
+| mac_pools | UNDEF | List of dictionaries that describe the mac pool. |
+
+The items in `mac_pools` list can contain the following parameters:
+
+| Name | Default value | Description |
+|---------------------------|-----------------------|-------------------------------------------------------------------|
+| mac_pool_name | UNDEF | Name of the the MAC pool to manage. |
+| mac_pool_ranges | UNDEF | List of MAC ranges. The from and to should be splitted by comma. For example: 00:1a:4a:16:01:51,00:1a:4a:16:01:61 |
+| mac_pool_allow_duplicates | UNDEF | If (true) allow a MAC address to be used multiple times in a pool. Default value is set by oVirt engine to false. |
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt set mac pool
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ mac_pools:
+ - mac_pool_name: my_mac_pool
+ mac_pool_allow_duplicates: false
+ mac_pool_ranges:
+ - 00:1a:4a:16:01:51,00:1a:4a:16:01:61
+
+ roles:
+ - ovirt.ovirt.infra.roles.mac_pools
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/tasks/main.yml
new file mode 100644
index 00000000..417b53ed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/mac_pools/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- name: set mac pools
+ ovirt_mac_pool:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item.mac_pool_name }}"
+ allow_duplicates: "{{ item.mac_pool_allow_duplicates | default(omit) }}"
+ ranges: "{{ item.mac_pool_ranges }}"
+ with_items:
+ - "{{ mac_pools | default([]) }}"
+ tags:
+ - mac_pools
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/README.md
new file mode 100644
index 00000000..bfd453ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/README.md
@@ -0,0 +1,77 @@
+oVirt Networks
+==============
+
+The `networks` role sets up oVirt networks.
+
+Role Variables
+--------------
+
+The `data_center_name` variable specifes the data center name of the network.
+
+The `logical_networks` list can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| name | UNDEF | Name of the network. |
+| state | present | Specifies whether the network state is `present` or `absent`. |
+| vlan_tag | UNDEF | Specify VLAN tag. |
+| vm_network | True | If True network will be marked as network for VM. |
+| mtu | UNDEF | Maximum transmission unit (MTU) of the network. |
+| description | UNDEF | Description of the network. |
+| clusters | UNDEF | List of dictionaries describing how the network is managed in specific cluster. |
+| label | UNDEF | Name of the label to assign to the network. |
+
+More information about the parameters can be found in the [ovirt_network](http://docs.ansible.com/ansible/ovirt_network_module.html) module documentation.
+
+The `host_networks` list can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|---------------------------------------|
+| name | UNDEF | Name of the host. |
+| state | UNDEF | Specifies whether the network state is `present` or `absent`. |
+| check | UNDEF | If true, verifies the connection between the host and engine. |
+| save | UNDEF | If true, the network configuration will be persistent, by default it is temporary. |
+| bond | UNDEF | Dictionary describing the network bond. |
+| networks | UNDEF | Dictionary describing the networks to be attached to the interface or bond. |
+| labels | UNDEF | List of names of the network label to be assigned to the bond or interface. |
+| interface | UNDEF | Name of the network interface where the logical network should be attached. |
+
+More information about the parameters can be found in the [ovirt_host_network](http://docs.ansible.com/ansible/ovirt_host_network_module.html) module documentation.
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ logical_networks:
+ - name: mynetwork
+ clusters:
+ - name: development
+ assigned: yes
+ required: no
+ display: no
+ migration: yes
+ gluster: no
+
+ host_networks:
+ - name: myhost1
+ check: true
+ save: true
+ bond:
+ name: bond0
+ mode: 2
+ interfaces:
+ - eth2
+ - eth3
+ networks:
+ - name: mynetwork
+ boot_protocol: dhcp
+
+ roles:
+ - ovirt.ovirt.infra.roles.networks
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/tasks/main.yml
new file mode 100644
index 00000000..ff8243b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/networks/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+#########################################
+# Logical networks
+#########################################
+- name: Add networks
+ ovirt_network:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ name: "{{ item.name }}"
+ data_center: "{{ data_center_name }}"
+ vlan_tag: "{{ item.vlan_tag | default(omit) }}"
+ vm_network: "{{ item.vm_network | default(omit) }}"
+ mtu: "{{ item.mtu | default(omit) }}"
+ description: "{{ item.description | default(omit) }}"
+ clusters: "{{ item.clusters | default(omit) }}"
+ label: "{{ item.label | default(omit) }}"
+ with_items:
+ - "{{ logical_networks | default([]) }}"
+ tags:
+ - logical_networks
+ - networks
+
+#########################################
+# Host networks
+#########################################
+- name: Setup host networks
+ ovirt_host_network:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item.name }}"
+ state: "{{ item.state | default(omit) }}"
+ check: "{{ item.check | default(omit) }}"
+ save: "{{ item.save | default(omit) }}"
+ bond: "{{ item.bond | default(omit) }}"
+ networks: "{{ item.networks | default(omit) }}"
+ labels: "{{ item.labels | default(omit) }}"
+ interface: "{{ item.interface | default(omit) }}"
+ with_items:
+ - "{{ host_networks | default([]) }}"
+ tags:
+ - host_networks
+ - networks
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/README.md
new file mode 100644
index 00000000..55970ee8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/README.md
@@ -0,0 +1,49 @@
+oVirt Permissions
+=================
+
+The `permissions` role is used to set up oVirt permissions.
+
+Role Variables
+--------------
+
+The `permissions` list can contain following parameters:
+
+| Name | Default value | Description |
+|---------------|----------------|----------------------------|
+| state | present | Specifies whether the state of the permission is `present` or `absent`. |
+| user_name | UNDEF | The user to manage the permission for. |
+| group_name | UNDEF | Name of the group to manage the permission for. |
+| authz_name | UNDEF | Name of the authorization provider of the group or user. |
+| role | UNDEF | The role to be assigned to the user or group. |
+| object_type | UNDEF | The object type which should be used to assign the permission. Possible object types are:<ul><li>data_center</li><li>cluster</li><li>host</li><li>storage_domain</li><li>network</li><li>disk</li><li>vm</li><li>vm_pool</li><li>template</li><li>cpu_profile</li><li>disk_profile</li><li>vnic_profile</li><li>system</li></ul> |
+| object_name | UNDEF | Name of the object where the permission should be assigned. |
+
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ permissions:
+ - state: present
+ user_name: user1
+ authz_name: internal-authz
+ role: UserRole
+ object_type: cluster
+ object_name: production
+
+ - state: present
+ group_name: group1
+ authz_name: internal-authz
+ role: UserRole
+ object_type: cluster
+ object_name: production
+
+ roles:
+ - ovirt.ovirt.infra.roles.permissions
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/tasks/main.yml
new file mode 100644
index 00000000..a8d662af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/permissions/tasks/main.yml
@@ -0,0 +1,40 @@
+---
+################################
+## User & groups system magement
+################################
+- name: Manage users
+ ovirt_user:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ name: "{{ item.name }}"
+ authz_name: "{{ item.authz_name | default(omit) }}"
+ with_items:
+ - "{{ users | default([]) }}"
+ tags:
+ - permissions
+
+- name: Manage groups
+ ovirt_group:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ name: "{{ item.name }}"
+ authz_name: "{{ item.authz_name | default(omit) }}"
+ with_items:
+ - "{{ user_groups | default([]) }}"
+ tags:
+ - permissions
+
+- name: Manage permissions
+ ovirt_permission:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.state | default(omit) }}"
+ group_name: "{{ item.group_name | default(omit) }}"
+ user_name: "{{ item.user_name | default(omit) }}"
+ authz_name: "{{ item.authz_name }}"
+ object_type: "{{ item.object_type }}"
+ object_name: "{{ item.object_name }}"
+ role: "{{ item.role }}"
+ with_items:
+ - "{{ permissions | default([]) }}"
+ tags:
+ - permissions
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/README.md
new file mode 100644
index 00000000..0b716bf5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/README.md
@@ -0,0 +1,65 @@
+oVirt Storages
+==============
+
+The `storages` role is used to set up oVirt storages.
+
+Role Variables
+--------------
+
+The value of item in `storages` dictionary can contain following parameters (the key is always a name of the storage):
+
+| Name | Default value | Description |
+|-----------------|----------------|---------------------------------------|
+| master | false | If true, the storage will be added as the first storage, meaning it will be the master storage. |
+| domain_function | data | The function of the storage domain. Possible values are: <ul><li>iso</li><li>export</li><li>data</li></ul>. |
+| localfs | UNDEF | Dictionary defining local storage. |
+| nfs | UNDEF | Dictionary defining NFS storage. |
+| iscsi | UNDEF | Dictionary defining iSCSI storage. |
+| posixfs | UNDEF | Dictionary defining PosixFS storage. |
+| fcp | UNDEF | Dictionary defining FCP storage. |
+| glusterfs | UNDEF | Dictionary defining glusterFS storage. |
+| discard_after_delete | UNDEF | If True storage domain blocks will be discarded upon deletion. Enabled by default. This parameter is relevant only for block based storage domains. |
+
+More information about the storages parameters can be found in the [Ansible documentation](http://docs.ansible.com/ansible/ovirt_storage_domains_module.html).
+
+Example Playbook
+----------------
+
+```yaml
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ storages:
+ mynfsstorage:
+ master: true
+ state: present
+ nfs:
+ address: 1.2.3.4
+ path: /path
+ myiscsistorage:
+ state: present
+ iscsi:
+ target: iqn.2014-07.org.ovirt:storage
+ port: 3260
+ address: 10.11.12.13
+ username: username
+ password: password
+ lun_id: 3600140551fcc8348ea74a99b6760fbb4
+ discard_after_delete: false
+ myexporttemplates:
+ domain_function: export
+ nfs:
+ address: 100.101.102.103
+ path: /templates
+ myisostorage:
+ domain_function: iso
+ nfs:
+ address: 111.222.111.222
+ path: /iso
+
+ roles:
+ - ovirt.ovirt.infra.roles.storages
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/tasks/main.yml
new file mode 100644
index 00000000..20fbdc7c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/roles/storages/tasks/main.yml
@@ -0,0 +1,103 @@
+---
+#################################################
+# Storages
+#################################################
+# First add master storage
+- name: Add master storage
+ ovirt_storage_domain:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.value.state | default(omit) }}"
+ name: "{{ item.key }}"
+ domain_function: "{{ item.value.domain_function | default(omit) }}"
+ host: "{{ ovirt_infra_hosts[0].name }}"
+ data_center: "{{ data_center_name }}"
+ localfs: "{{ item.value.localfs | default(omit) }}"
+ nfs: "{{ item.value.nfs | default(omit) }}"
+ iscsi: "{{ item.value.iscsi | default(omit) }}"
+ posixfs: "{{ item.value.posixfs | default(omit) }}"
+ glusterfs: "{{ item.value.glusterfs | default(omit) }}"
+ fcp: "{{ item.value.fcp | default(omit) }}"
+ discard_after_delete: "{{ item.value.discard_after_delete | default(omit) }}"
+ with_dict: "{{ storages | default({}) }}"
+ when: item.value.master is defined and item.value.master
+ tags:
+ - storages
+
+# Next add rest of data storages
+- name: Add storages
+ ovirt_storage_domain:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.value.state | default(omit) }}"
+ name: "{{ item.key }}"
+ domain_function: "{{ item.value.domain_function | default(omit) }}"
+ host: "{{ ovirt_infra_hosts[0].name }}"
+ data_center: "{{ data_center_name }}"
+ localfs: "{{ item.value.localfs | default(omit) }}"
+ nfs: "{{ item.value.nfs | default(omit) }}"
+ iscsi: "{{ item.value.iscsi | default(omit) }}"
+ posixfs: "{{ item.value.posixfs | default(omit) }}"
+ glusterfs: "{{ item.value.glusterfs | default(omit) }}"
+ fcp: "{{ item.value.fcp | default(omit) }}"
+ discard_after_delete: "{{ item.value.discard_after_delete | default(omit) }}"
+ with_dict: "{{ storages | default({}) }}"
+ when: item.value.domain_function is not defined
+ tags:
+ - storages
+
+# Next add export/iso storages
+- name: Add export/iso storages
+ ovirt_storage_domain:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ item.value.state | default(omit) }}"
+ name: "{{ item.key }}"
+ domain_function: "{{ item.value.domain_function | default(omit) }}"
+ host: "{{ ovirt_infra_hosts[0].name }}"
+ data_center: "{{ data_center_name }}"
+ localfs: "{{ item.value.localfs | default(omit) }}"
+ nfs: "{{ item.value.nfs | default(omit) }}"
+ iscsi: "{{ item.value.iscsi | default(omit) }}"
+ posixfs: "{{ item.value.posixfs | default(omit) }}"
+ glusterfs: "{{ item.value.glusterfs | default(omit) }}"
+ fcp: "{{ item.value.fcp | default(omit) }}"
+ discard_after_delete: "{{ item.value.discard_after_delete | default(omit) }}"
+ with_dict: "{{ storages | default({}) }}"
+ when: item.value.domain_function is defined
+ tags:
+ - storages
+
+- name: Fetch storages
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "type=nfs or type=iscsi or type=posixfs or type=glusterfs or type=fcp"
+ # pattern: "type=nfs or type=posixfs or type=glusterfs"
+ # pattern: "type=iscsi or type=fcp"
+ fetch_nested: true
+ register: sd_info
+ tags:
+ - storages
+ - storage_connections
+
+- name: Validate connections of storages
+ ovirt_storage_connection:
+ auth: "{{ ovirt_auth }}"
+ id: "{{ ansible_version.full is version('2.6.0', '>=') | ternary(item.1.id, item.1.id[0]) }}"
+ storage: "{{ item.0.name }}"
+ address: "{{ storages[item.0.name][item.0.storage.type].address | default(omit) }}"
+ path: "{{ storages[item.0.name][item.0.storage.type].path | default(omit) }}"
+ nfs_timeout: "{{ storages[item.0.name][item.0.storage.type].timeout | default(omit) }}"
+ nfs_version: "{{ storages[item.0.name][item.0.storage.type].version | default(omit) }}"
+ nfs_retrans: "{{ storages[item.0.name][item.0.storage.type].retrans | default(omit) }}"
+ mount_options: "{{ storages[item.0.name][item.0.storage.type].mount_options | default(omit) }}"
+ username: "{{ storages[item.0.name][item.0.storage.type].username | default(omit) }}"
+ password: "{{ storages[item.0.name][item.0.storage.type].password | default(omit) }}"
+ port: "{{ storages[item.0.name][item.0.storage.type].port | default(omit) }}"
+ target: "{{ storages[item.0.name][item.0.storage.type].target | default(omit) }}"
+ force: true
+ with_subelements:
+ - "{{ sd_info.ovirt_storage_domains | default([]) }}"
+ - storage_connections
+ - skip_missing: yes
+ when: storages is defined and item.0.name in storages
+ tags:
+ - storages
+ - storage_connections
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/create_infra.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/create_infra.yml
new file mode 100644
index 00000000..152498ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/create_infra.yml
@@ -0,0 +1,37 @@
+---
+- name: Run mac-pools sub-role
+ import_role:
+ name: ovirt.ovirt.infra.roles.mac_pools
+
+- name: Run datacenters sub-role
+ import_role:
+ name: ovirt.ovirt.infra.roles.datacenters
+
+- name: Run clusters sub-role
+ import_role:
+ name: ovirt.ovirt.infra.roles.clusters
+
+- name: Run hosts sub-role
+ import_role:
+ name: ovirt.ovirt.infra.roles.hosts
+
+- name: Run networks sub-role
+ import_role:
+ name: ovirt.ovirt.infra.roles.networks
+
+- name: Run storages sub-role
+ import_role:
+ name: ovirt.ovirt.infra.roles.storages
+
+- name: Run aaa-jdbc sub-role
+ import_role:
+ name: ovirt.ovirt.infra.roles.aaa_jdbc
+ when: users is defined or user_groups is defined
+
+- name: Run external-providers sub-role
+ import_role:
+ name: ovirt.ovirt.infra.roles.external_providers
+
+- name: Run permissions sub-role
+ import_role:
+ name: ovirt.ovirt.infra.roles.permissions
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/main.yml
new file mode 100644
index 00000000..68f3e496
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/main.yml
@@ -0,0 +1,48 @@
+---
+- block:
+ - name: Check if mandatory parameters are correct
+ fail:
+ msg: "one of mandatory parameter data_center_name or compatibility_version is undefined"
+ when: "data_center_name is undefined or compatibility_version is undefined"
+
+ - name: Get list of oVirt infra hosts we can use throughtout the role
+ set_fact:
+ ovirt_infra_hosts: "{{ lookup('vars', hosts_var_name, default=[]) }}"
+
+ - name: Check if hosts are correct
+ fail:
+ msg: "'{{ lookup('vars', hosts_var_name) }}' variable does not contain mandatory parameter '{{ item[1] }}'"
+ when: item[1] not in item[0]
+ with_nested:
+ - "{{ ovirt_infra_hosts }}"
+ - ['name']
+
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ hostname: "{{ engine_fqdn | default(lookup('env','OVIRT_HOSTNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: loggedin
+ tags:
+ - always
+
+ - name: Create infrastrucutre
+ import_tasks: create_infra.yml
+ when: data_center_state == 'present'
+
+ - name: Remove infrastrucutre
+ import_tasks: remove_infra.yml
+ when: data_center_state == 'absent'
+
+ always:
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when: not loggedin.skipped | default(false)
+ tags:
+ - always
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/remove_infra.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/remove_infra.yml
new file mode 100644
index 00000000..d134788b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/infra/tasks/remove_infra.yml
@@ -0,0 +1,4 @@
+---
+- name: Run datacenters sub-role
+ import_role:
+ name: ovirt.ovirt.infra.roles.datacenters
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/README.md
new file mode 100644
index 00000000..3f8b767a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/README.md
@@ -0,0 +1,242 @@
+Deploy ManageIQ in oVirt
+==================================================
+
+The `manageiq` role downloads a ManageIQ/CloudForms QCOW image and deploys it into oVirt/Red Hat Virtualization (RHV).
+
+The role also enables you to create a virtual machine and attach the ManageIQ disk, then wait for the ManageIQ system to initialize, and register oVirt as an infrastructure provider.
+
+Requirements
+------------
+
+* [ovirt-imageio](http://www.ovirt.org/develop/release-management/features/storage/image-upload/) must be installed and running.
+
+Additionally, perform the following checks to ensure the required processes are running.
+* Check whether `ovirt-imageio-proxy` is running on the engine:
+
+ ```
+systemctl status ovirt-imageio-proxy
+```
+
+* Check whether `ovirt-imageio-daemon` is running on the hosts:
+
+ ```
+systemctl status ovirt-imageio-daemon
+```
+
+You will also require the CA certificate of the engine. To do this, configure the `ovirt_ca` variable with the path to the CA certificate.
+
+Limitations
+-----------
+
+ * We don not support Ansible Check Mode (Dry Run), because this role is using few modules(command module),
+ which do not support it. Once all modules used by this role will support it, we will support it.
+
+Role Variables
+--------------
+
+QCOW variables:
+
+| Name | Default value | Description |
+|---------------|----------------------------------------------------------|--------------------------------------------------------------|
+| miq_qcow_url | http://releases.manageiq.org/manageiq-ovirt-hammer-6.qc2 | The URL of the ManageIQ QCOW image. |
+| miq_image_path | /tmp/ | Path where the QCOW2 image will be downloaded to. If directory the base name of the URL on the remote server will be used. |
+| miq_image_checksum | UNDEF | If a checksum is defined, the digest of the destination file will be calculated after it is downloaded to ensure its integrity and verify that the transfer completed successfully. Format: :, e.g. checksum="sha256:D98291AC[...]B6DC7B97". |
+
+Engine login variables:
+
+| Name | Default value | Description |
+|---------------------|-------------------|-----------------------------------------|
+| engine_user | UNDEF | The user to access the engine. |
+| engine_password | UNDEF | The password of the 'engine_user'. |
+| engine_fqdn | UNDEF | The FQDN of the engine. |
+| engine_ca | UNDEF | The path to the engine's CA certificate.|
+
+Virtual machine variables:
+
+| Name | Default value | Description |
+|-----------------------|---------------------|----------------------------------------------------------------|
+| miq_vm_name | manageiq_gaprindashvili-3 | The name of the ManageIQ virtual machine. |
+| miq_vm_cluster | Default | The cluster of the virtual machine. |
+| miq_vm_memory | 16GiB | The virtual machine's system memory. |
+| miq_vm_memory_guaranteed | UNDEF | Amount of minimal guaranteed memory of the Virtual Machine. miq_vm_memory_guaranteed parameter can't be lower than miq_vm_memory parameter. |
+| miq_vm_memory_max | UNDEF | Upper bound of virtual machine memory up to which memory hot-plug can be performed. |
+| miq_vm_cpu | 4 | The number of virtual machine CPU cores. |
+| miq_vm_cpu_shares | UNDEF | Set a CPU shares for this Virtual Machine. |
+| miq_vm_cpu_sockets | UNDEF | Number of virtual CPUs sockets of the Virtual Machine. |
+| miq_vm_cpu_threads | UNDEF | Number of virtual CPUs threads of the Virtual Machine. |
+| miq_vm_os | rhel_7x64 | The virtual machine operating system. |
+| miq_vm_root_password | `miq_app_password` | The root password for the virtual machine. |
+| miq_vm_cloud_init | UNDEF | The cloud init dictionary to be passed to the virtual machine. |
+| miq_vm_high_availability | true | If yes ManageIQ virtual machine will be set as highly available. |
+| miq_vm_high_availability_priority | 50 | Indicates the priority of the virtual machine inside the run and migration queues. The value is an integer between 0 and 100. The higher the value, the higher the priority. |
+| miq_vm_delete_protected | true | If yes ManageIQ virtual machine will be set as delete protected. |
+| miq_debug_create | false | If true log sensitive data, useful for debug purposes. |
+| miq_wait_for_ip_version | v4 | Specify which IP version should be wait for. Either v4 or v6. |
+| miq_wait_for_ip_timeout | 240 | Maximum ammount of time the playbook should wait for the IP to be reported. |
+
+Virtual machine main disks variables (e.g. operating system):
+
+| Name | Default value | Description |
+|---------------------|----------------------|-----------------------------------------|
+| miq_vm_disk_name | `miq_vm_name` | The name of the virtual machine disk. |
+| miq_vm_disk_storage | UNDEF | The target storage domain of the disk. |
+| miq_vm_disk_size | Size of qcow disk | The virtual machine disk size. |
+| miq_vm_disk_interface | virtio_scsi | The virtual machine disk interface type.|
+| miq_vm_disk_format | cow | The format of the virtual machine disk. |
+
+Virtual machine extra disks (e.g. database, log, tmp): a dict named
+`miq_vm_disks` allows to describe each of the extra disks (see example
+playbook). Note, that this works only with CFME.
+For each disk, the following attributes can be set:
+
+| Name | Default value | Description |
+|-----------|---------------|----------------------------------------------------------------------|
+| name | `miq_vm_name`_`type` | The name of the virtual machine disk. |
+| size | UNDEF | The virtual machine disk size (`XXGiB`). |
+| interface | virtio_scsi | The virtual machine disk interface type (`virtio` or `virtio_scsi`). `virtio_scsi` is recommended, as `virtio` has low limit of count of disks. |
+| format | UNDEF | The format of the virtual machine disk (`raw` or `cow`). |
+| timeout | UNDEF | Timeout of disk creation. |
+
+Virtual machine NICs variables:
+
+| Name | Default value | Description |
+|---------------------|-------------------|------------------------------------------------------|
+| miq_vm_nics | {'name': 'nic1', 'profile_name': 'ovirtmgmt', 'interaface': 'virtio'} | List of dictionaries that defines the virtual machine network interfaces. |
+
+The item in `miq_vm_nics` list of can contain following attributes:
+
+| Name | Default value | |
+|--------------------|----------------|----------------------------------------------|
+| name | UNDEF | The name of the network interface. |
+| interface | UNDEF | Type of the network interface. |
+| mac_address | UNDEF | Custom MAC address of the network interface, by default it's obtained from MAC pool. |
+| profile_name | UNDEF | Virtual network interface profile to be attached to VM network interface. |
+
+ManageIQ variables:
+
+| Name | Default value | Description |
+|--------------------|---------------------|----------------------------------------------------------------------------|
+| miq_app_username | admin | The username used to login to ManageIQ. |
+| miq_app_password | smartvm | The password of user specific in username used to login to ManageIQ. |
+| miq_username | admin | Alias of `miq_app_username` for backward compatibility. |
+| miq_password | smartvm | Alias of `miq_app_password` for backward compatibility. |
+| miq_db_username | root | The username to connect to the database. |
+| miq_db_password | `miq_app_password` | The password of user specific in username used to connect to the database. |
+| miq_region | 0 | The ManageIQ region created in the database. Note: Works only with CFME. |
+| miq_company | My Company | The company name of the appliance. |
+| miq_disabled_roles | [] | List of ManageIQ server roles to disable on the appliance. |
+| miq_enabled_roles | [] | List of ManageIQ server roles to enable on the appliance. |
+
+Both on ManageIQ and CloudForms, the default enabled server roles are:
+ - `automate` - Automation Engine
+ - `database_operations` - Database Operations
+ - `event` - Event Monitor
+ - `ems_inventory` - Provider Inventory
+ - `ems_operations` - Provider Operations
+ - `reporting` - Reporting
+ - `scheduler` - Scheduler
+ - `smartstate` - SmartState Analysis
+ - `user_interface` - User Interface
+ - `websocket` - Websocket
+ - `web_services` - Web Services
+
+RHV provider and RHV metrics variables:
+
+| Name | Default value | Description |
+|-----------------------|----------------|--------------------------------------------------------|
+| miq_rhv_provider_name | RHV provider | Name of the RHV provider to be displayed in ManageIQ. |
+| metrics_fqdn | UNDEF | FQDN of the oVirt/RHV metrics. |
+| metrics_user | engine_history | The user to connection to metrics server. |
+| metrics_password | "" | The password of the `metrics_user` . |
+| metrics_port | 5432 | Port to connect to oVirt/RHV metrics. |
+| metrics_db_name | ovirt_engine_history | Database name of the oVirt engine metrics database. |
+
+Example Playbook
+----------------
+
+Note that for passwords you should use Ansible vault.
+
+Here is an example how to deploy CFME:
+
+```yaml
+ - name: Deploy CFME to oVirt engine
+ hosts: localhost
+ gather_facts: no
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+
+ miq_qcow_url: https://cdn.example.com/cfme-rhevm-5.9.1.2-1.x86_64.qcow2
+ miq_vm_name: cfme_59
+ miq_vm_cluster: mycluster
+ miq_vm_cloud_init:
+ host_name: "{{ miq_vm_name }}"
+ miq_vm_disks:
+ database:
+ name: "{{ miq_vm_name }}_database"
+ size: 10GiB
+ interface: virtio_scsi
+ format: raw
+ log:
+ name: "{{ miq_vm_name }}_log"
+ size: 10GiB
+ interface: virtio_scsi
+ format: cow
+ tmp:
+ name: "{{ miq_vm_name }}_tmp"
+ size: 10GiB
+ interface: virtio_scsi
+ format: cow
+ miq_disabled_roles:
+ - smartstate
+ miq_enabled_roles:
+ - notifier
+ - ems_metrics_coordinator
+ - ems_metrics_collector
+ - ems_metrics_processor
+ - embedded_ansible
+
+ roles:
+ - manageiq
+ collections:
+ - ovirt.ovirt
+```
+
+Here is an example how to deploy ManageIQ:
+
+```yaml
+---
+- name: oVirt ManageIQ deployment
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` and `metrics_password`
+ # varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ miq_qcow_url: http://releases.manageiq.org/manageiq-ovirt-hammer-6.qc2
+ miq_vm_name: manageiq_hammer6
+ miq_vm_cluster: mycluster
+
+ metrics_fqdn: metrics.example.com
+ metrics_port: 8443
+ metrics_user: admin
+
+
+ roles:
+ - manageiq
+ collections:
+ - ovirt.ovirt
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/defaults/main.yml
new file mode 100644
index 00000000..6e0b7bd9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/defaults/main.yml
@@ -0,0 +1,81 @@
+---
+### This option disables no_log of tasks using sensistive data:
+miq_debug_create: false
+
+### Wait for IP version (v4 or v6):
+miq_wait_for_ip_version: v4
+miq_wait_for_ip_timeout: 240
+
+### ManageIQ/CloudForms ###
+miq_image_path: /tmp
+
+# QCOW2 ManageIQ/CloudForms image URL:
+miq_qcow_url: http://releases.manageiq.org/manageiq-ovirt-hammer-6.qc2
+
+# ManageIQ/CloudForms application credentials
+# We keep miq_{username,password} for backward compatibility
+miq_username: admin
+miq_password: smartvm
+miq_app_username: "{{ miq_username }}"
+miq_app_password: "{{ miq_password }}"
+
+# ManageIQ/CloudForms database credentials
+miq_db_username: root
+miq_db_password: "{{ miq_app_password }}"
+
+# ManageIQ/CloudForms region
+miq_region: 0
+miq_region_id: 1
+
+# ManageIQ/CloudForms company name
+miq_company: My Company
+
+# Providers:
+miq_rhv_provider_name: RHV provider
+miq_initialize: true
+
+### oVirt/RHV ###
+# VM variables:
+miq_vm_name: manageiq_gaprindashvili-5
+miq_vm_cluster: Default
+miq_vm_memory: 16GiB
+miq_vm_cpu: 4
+miq_vm_os: rhel_7x64
+miq_vm_root_password: "{{ miq_app_password }}"
+miq_vm_high_availability: true
+miq_vm_high_availability_priority: 50
+miq_vm_delete_protected: true
+
+# Vm disks
+miq_vm_disk_interface: virtio
+miq_vm_disk_format: cow
+miq_disk_deploy_failed: false
+
+# Additional disks.
+# Default one is database disk.
+miq_vm_disks:
+ database:
+ name: "{{ miq_vm_name }}_database"
+ size: 50GiB
+ interface: virtio_scsi
+ format: raw
+ timeout: 900
+
+# Vm NICS:
+miq_vm_nics:
+ - name: nic1
+ profile_name: ovirtmgmt
+ interface: virtio
+
+# Metrics DB name
+metrics_db_name: ovirt_engine_history
+metrics_port: 5432
+metrics_user: engine_history
+metrics_password: ''
+
+# ManageIQ/CloudForms roles
+miq_disabled_roles: []
+miq_enabled_roles: []
+
+# Command to initialize cloudforms
+miq_init_cmd: "appliance_console_cli -i -r {{ miq_region }} -U {{ miq_db_username }} -p '{{ miq_db_password }}' -k -f"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/cfme.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/cfme.yml
new file mode 100644
index 00000000..15d0d46c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/cfme.yml
@@ -0,0 +1,50 @@
+---
+- name: RHV CFME deployment
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` and `metrics_password`
+ # varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+
+ miq_vm_name: cfme_59
+ miq_qcow_url: https://cdn.example.com/cfme-rhevm-5.9.1.2-1.x86_64.qcow2
+ miq_vm_cluster: mycluster
+ miq_vm_root_password: securepassword
+ miq_vm_cloud_init:
+ host_name: "{{ miq_vm_name }}"
+ miq_vm_disks:
+ database:
+ name: "{{ miq_vm_name }}_database"
+ size: 10GiB
+ interface: virtio_scsi
+ format: raw
+ log:
+ name: "{{ miq_vm_name }}_log"
+ size: 10GiB
+ interface: virtio_scsi
+ format: cow
+ tmp:
+ name: "{{ miq_vm_name }}_tmp"
+ size: 10GiB
+ interface: virtio_scsi
+ format: raw
+ miq_disabled_roles:
+ - smartstate
+ miq_enabled_roles:
+ - notifier
+ - ems_metrics_coordinator
+ - ems_metrics_collector
+ - ems_metrics_processor
+ - embedded_ansible
+
+ roles:
+ - manageiq
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/manageiq.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/manageiq.yml
new file mode 100644
index 00000000..21f13740
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/manageiq.yml
@@ -0,0 +1,29 @@
+---
+- name: oVirt ManageIQ deployment
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` and `metrics_password`
+ # varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ miq_qcow_url: http://releases.manageiq.org/manageiq-ovirt-hammer-6.qc2
+ miq_vm_name: manageiq_hammer6
+ miq_vm_cluster: mycluster
+
+ metrics_fqdn: metrics.example.com
+ metrics_port: 8443
+ metrics_user: admin
+
+
+ roles:
+ - manageiq
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/passwords.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/passwords.yml
new file mode 100644
index 00000000..92c7613c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/cfme_add_disk.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/cfme_add_disk.yml
new file mode 100644
index 00000000..1277089e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/cfme_add_disk.yml
@@ -0,0 +1,16 @@
+- name: Add {{ item }} disk for CFME
+ ovirt_disk:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ miq_vm_disks[item].name | default(miq_vm_name ~ '_' ~ item) }}"
+ vm_name: "{{ miq_vm_name }}"
+ interface: "{{ miq_vm_disks[item].interface | default('virtio_scsi') }}"
+ size: "{{ miq_vm_disks[item].size | default(omit) }}"
+ format: "{{ miq_vm_disks[item].format | default(omit) }}"
+ timeout: "{{ miq_vm_disks[item].timeout | default(omit) }}"
+ storage_domain: "{{ miq_vm_disks[item].storage | default(disk_storage_domain.name if disk_storage_domain is defined else miq_vm_disk_storage) }}"
+ activate: yes
+
+- name: Add {{ item }} disk to CloudForms initialization command
+ no_log: "{{ not miq_debug_create }}"
+ set_fact:
+ miq_init_cmd2: "{{ miq_init_cmd2 }} {{ miq_init_cmd_options.disks[item] }} {{ miq_vm_disks_devices[item] }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/cfme_identify_disk_device.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/cfme_identify_disk_device.yml
new file mode 100644
index 00000000..16cffcec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/cfme_identify_disk_device.yml
@@ -0,0 +1,78 @@
+- name: Set database disk to /dev/vdb if disk interface is virtio
+ set_fact:
+ miq_vm_disks_db_device: /dev/vdb
+ when: "miq_vm_disks.database.interface == 'virtio'"
+
+- name: Set database disk to /dev/sdb if disk interface is virtio_scsi
+ set_fact:
+ miq_vm_disks_db_device: /dev/sdb
+ when: "miq_vm_disks.database.interface == 'virtio_scsi'"
+
+- name: Set miq_vm_disks_devices database
+ set_fact:
+ miq_vm_disks_devices:
+ database: "{{ miq_vm_disks_db_device }}"
+
+- block:
+
+ - name: Set log disk to /dev/vdc if disk interface is virtio
+ set_fact:
+ miq_vm_disks_log_device: /dev/vdc
+ when: "miq_vm_disks.log.interface == 'virtio'"
+
+ - name: Set log disk to /dev/sdc if disk interface is virtio_scsi
+ set_fact:
+ miq_vm_disks_log_device: /dev/sdc
+ when: "miq_vm_disks.log.interface == 'virtio_scsi'"
+
+ - name: Set fact miq_vm_disks_log_device_dict
+ set_fact:
+ miq_vm_disks_log_device_dict:
+ log: "{{ miq_vm_disks_log_device }}"
+
+ - name: Combine miq_vm_disks_devices with miq_vm_disks_log_device_dict
+ set_fact:
+ miq_vm_disks_devices: "{{ miq_vm_disks_devices | combine(miq_vm_disks_log_device_dict) }}"
+
+ when: "'log' in miq_vm_disks"
+
+- block:
+
+ - block:
+
+ - name: Set tmp disk to /dev/vdc if disk interface is virtio
+ set_fact:
+ miq_vm_disks_tmp_device: /dev/vdc
+ when: "miq_vm_disks.tmp.interface == 'virtio'"
+
+ - name: Set tmp disk to /dev/sdc if disk interface is virtio_scsi
+ set_fact:
+ miq_vm_disks_tmp_device: /dev/sdc
+ when: "miq_vm_disks.tmp.interface == 'virtio_scsi'"
+
+ when: "'log' not in miq_vm_disks"
+
+ - block:
+
+ - name: Set tmp disk to /dev/vdd if disk interface is virtio
+ set_fact:
+ miq_vm_disks_tmp_device: /dev/vdd
+ when: "miq_vm_disks.tmp.interface == 'virtio'"
+
+ - name: Set tmp disk to /dev/sdd if disk interface is virtio_scsi
+ set_fact:
+ miq_vm_disks_tmp_device: /dev/sdd
+ when: "miq_vm_disks.tmp.interface == 'virtio_scsi'"
+
+ when: "'log' in miq_vm_disks"
+
+ - name: Set fact miq_vm_disks_tmp_device_dict with tmp miq_vm_disks_tmp_device
+ set_fact:
+ miq_vm_disks_tmp_device_dict:
+ tmp: "{{ miq_vm_disks_tmp_device }}"
+
+ - name: Combine miq_vm_disks_devices with miq_vm_disks_tmp_device_dict
+ set_fact:
+ miq_vm_disks_devices: "{{ miq_vm_disks_devices | combine(miq_vm_disks_tmp_device_dict) }}"
+
+ when: "'tmp' in miq_vm_disks"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/deploy_qcow2.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/deploy_qcow2.yml
new file mode 100644
index 00000000..cca5258f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/deploy_qcow2.yml
@@ -0,0 +1,86 @@
+- name: Check if {{ miq_image_path }} is directory
+ stat:
+ path: "{{ miq_image_path }}"
+ register: image_path_st
+
+- name: Download the qcow image
+ get_url:
+ url: "{{ miq_qcow_url }}"
+ dest: "{{ image_path_st.stat.isdir is defined and image_path_st.stat.isdir | ternary( miq_image_path~'/'~miq_qcow_url.rpartition('/')[-1], miq_image_path) | regex_replace('//', '/') }}"
+ checksum: "{{ miq_image_checksum | default(omit) }}"
+ register: downloaded_file
+
+- name: Check file type
+ command: "/usr/bin/file {{ downloaded_file.dest | quote }}"
+ changed_when: false
+ register: filetype
+
+- name: Fail if image is not qcow
+ fail:
+ msg: "The downloaded file is not a valid QCOW file."
+ when: '"QCOW" not in filetype.stdout'
+
+- name: Calculate image size in GiB
+ set_fact:
+ miq_image_size_gib: "{{ filetype.stdout_lines[0].split()[5] | int // 2**30 }}"
+
+#
+# Find default disk size for miq disk:
+#
+- block:
+ - name: Extract integer from miq_vm_disk_size
+ set_fact:
+ miq_vm_disk_size_gib: "{{ miq_vm_disk_size | regex_replace('GiB$') }}"
+
+ - name: Fail if miq_vm_disk_size is less than qcow size
+ fail:
+ msg: "Setting a disk size ({{ miq_vm_disk_size }}) lower than the image size ({{ miq_image_size_gib }}GiB) may result in disk corruption."
+ when: "miq_vm_disk_size_gib < miq_image_size_gib"
+ when: "miq_vm_disk_size is defined"
+
+#
+# Find default data storage domain for Miq disk:
+#
+- block:
+ - name: Fetch storages
+ ovirt_storage_domain_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "Clusters.name={{ miq_vm_cluster }} and status=active"
+ register: sd_info
+
+ - name: Find data domain
+ set_fact:
+ disk_storage_domain: "{{ sd_info.ovirt_storage_domains | json_query(the_query) | list | first }}"
+ vars:
+ the_query: "[?type=='data']"
+ when: miq_vm_disk_storage is undefined
+
+- name: Check if VM already exists
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ miq_vm_name }}"
+ register: vm_info
+
+- block:
+ - name: Deploy the qcow image to oVirt engine
+ ovirt_disk:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ miq_vm_disk_name | default(miq_vm_name) }}"
+ interface: "{{ miq_vm_disk_interface }}"
+ size: "{{ miq_vm_disk_size | default(miq_image_size_gib + 'GiB') }}"
+ format: "{{ miq_vm_disk_format }}"
+ image_path: "{{ downloaded_file.dest }}"
+ storage_domain: "{{ disk_storage_domain.name if disk_storage_domain is defined else miq_vm_disk_storage }}"
+ force: "{{ vm_info.ovirt_vms | length == 0 }}"
+ register: ovirt_disk
+
+ rescue:
+ - name: Remove failed disk
+ ovirt_disk:
+ auth: "{{ ovirt_auth }}"
+ state: absent
+ name: "{{ miq_vm_disk_name | default(miq_vm_name) }}"
+
+ - name: Set miq_disk_deploy_failed
+ set_fact:
+ miq_disk_deploy_failed: true
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/init_cfme.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/init_cfme.yml
new file mode 100644
index 00000000..a44caa68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/init_cfme.yml
@@ -0,0 +1,66 @@
+- name: Add host alias of appliance
+ no_log: "{{ not miq_debug_create }}"
+ add_host:
+ hostname: "{{ miq_ip_addr }}"
+ ansible_host: "{{ miq_ip_addr }}"
+ ansible_user: root
+ ansible_password: smartvm
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+ changed_when: false
+
+- name: Wait for SSH port of appliance
+ wait_for:
+ host: "{{ miq_ip_addr }}"
+ port: 22
+ delay: 10
+
+- name: Fetch info about appliance
+ command: "rpm -qi cfme"
+ args:
+ warn: no
+ register: cfme_rpm
+ ignore_errors: yes
+ changed_when: false
+ delegate_to: "{{ miq_ip_addr }}"
+
+- name: Start ManageIQ server
+ systemd:
+ name: evmserverd
+ state: started
+ enabled: yes
+ delegate_to: "{{ miq_ip_addr }}"
+ when: "not cfme_rpm.rc == 0"
+
+- name: Check if ManageIQ/CloudForms was initilized
+ no_log: "{{ not miq_debug_create }}"
+ uri:
+ url: "https://{{ miq_ip_addr }}/api/"
+ validate_certs: no
+ user: "{{ miq_app_username }}"
+ password: smartvm
+ register: init_miq_vm
+ ignore_errors: yes
+
+- block:
+ - name: Set region id
+ set_fact:
+ miq_region_id: "{{ miq_region|int * 1000000000000 + 1 }}"
+
+ - name: Initialize CloudForms
+ command: "{{ miq_init_cmd2 }}"
+ delegate_to: "{{ miq_ip_addr }}"
+ when: "init_miq_vm.failed"
+
+ when: "cfme_rpm.rc == 0 and init_miq_vm.failed"
+
+- name: Set root password of appliance
+ no_log: "{{ not miq_debug_create }}"
+ shell: set -o pipefail | echo '{{ miq_vm_root_password }}' | passwd --stdin root
+ delegate_to: "{{ miq_ip_addr }}"
+ changed_when: false
+
+- name: Disable cloud-init service
+ service:
+ enabled: no
+ name: cloud-init
+ delegate_to: "{{ miq_ip_addr }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/main.yml
new file mode 100644
index 00000000..d9716797
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/main.yml
@@ -0,0 +1,111 @@
+---
+- block:
+ ## Initialize authentication parameters:
+ - name: Set engine_url from engine_fqdn
+ set_fact:
+ engine_url: "{{ 'https://' ~engine_fqdn | default(lookup('env', 'OVIRT_HOSTNAME')) ~ '/ovirt-engine/api' }}"
+ when: engine_fqdn is defined or lookup('env', 'OVIRT_HOSTNAME')
+
+ - name: Set engine credentials
+ set_fact:
+ engine_user: "{{ engine_user | default(lookup('env', 'OVIRT_USERNAME')) }}"
+ engine_password: "{{ engine_password | default(lookup('env', 'OVIRT_PASSWORD')) }}"
+ engine_url: "{{ engine_url | default(lookup('env', 'OVIRT_URL')) }}"
+ engine_cafile: "{{ engine_cafile | default(lookup('env', 'OVIRT_CAFILE')) }}"
+
+ - name: Login to oVirt engine
+ ovirt_auth:
+ username: "{{ engine_user }}"
+ password: "{{ engine_password }}"
+ url: "{{ engine_url }}"
+ ca_file: "{{ engine_cafile }}"
+ insecure: "{{ engine_cafile | length > 0 }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: loggedin
+ tags:
+ - always
+
+ - name: Deploy qcow disk
+ include_tasks: deploy_qcow2.yml
+
+ - block:
+ - name: Create ManageIQ virtual machine
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: present
+ name: "{{ miq_vm_name }}"
+ cluster: "{{ miq_vm_cluster }}"
+ memory: "{{ miq_vm_memory }}"
+ memory_max: "{{ miq_vm_memory_max | default(omit) }}"
+ memory_guaranteed: "{{ miq_vm_memory_guaranteed | default(omit) }}"
+ cpu_cores: "{{ miq_vm_cpu }}"
+ cpu_shares: "{{ miq_vm_cpu_shares | default(omit) }}"
+ cpu_sockets: "{{ miq_vm_cpu_sockets | default(omit) }}"
+ cpu_threads: "{{ miq_vm_cpu_threads | default(omit) }}"
+ operating_system: "{{ miq_vm_os }}"
+ high_availability: "{{ miq_vm_high_availability }}"
+ high_availability_priority: "{{ miq_vm_high_availability_priority }}"
+ delete_protected: "{{ miq_vm_delete_protected }}"
+ type: server
+ disks:
+ - id: "{{ ovirt_disk.id }}"
+ bootable: true
+ nics: "{{ miq_vm_nics }}"
+ register: create_vm
+
+ - name: Duplicate miq_init_cmd variable to override it
+ set_fact:
+ miq_init_cmd2: "{{ miq_init_cmd }}"
+
+ - include_tasks: cfme_identify_disk_device.yml
+
+ - include_tasks: cfme_add_disk.yml
+ when: "item in miq_vm_disks"
+ with_items: "{{ miq_vm_disks_types }}"
+
+ - name: Ensure virtual machine is running
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: running
+ name: "{{ miq_vm_name }}"
+ cloud_init: "{{ miq_vm_cloud_init | default(omit) }}"
+
+ - set_fact:
+ ip_cond: "vm_info.ovirt_vms | ovirt.ovirt.ovirtvmip{{ miq_wait_for_ip_version }} | length > 0"
+
+ - name: Wait for VM IP
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ miq_vm_name }}"
+ fetch_nested: true
+ nested_attributes: ips
+ until: "ip_cond"
+ retries: "{{ miq_wait_for_ip_timeout // 10 }}"
+ delay: 10
+ register: vm_info
+
+ - name: ManageIQ host IPv4 address
+ set_fact:
+ miq_ip_addr: "{{ vm_info.ovirt_vms | ovirt.ovirt.ovirtvmipv4 }}"
+ when: miq_wait_for_ip_version == 'v4'
+
+ - name: ManageIQ host IPv6 address
+ set_fact:
+ miq_ip_addr: "{{ vm_info.ovirt_vms | ovirt.ovirt.ovirtvmipv6 }}"
+ when: miq_wait_for_ip_version == 'v6'
+
+ - block:
+ - include: init_cfme.yml
+ - include: wait_for_api.yml
+
+ when: "miq_initialize"
+ when: "not miq_disk_deploy_failed"
+
+ always:
+ - name: Logout from oVirt engine
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when: not loggedin.skipped | default(false)
+ tags:
+ - always
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/manage_appliance_roles.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/manage_appliance_roles.yml
new file mode 100644
index 00000000..d0e14eb4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/manage_appliance_roles.yml
@@ -0,0 +1,33 @@
+---
+- name: Get the list of enabled roles
+ uri:
+ url: "https://{{ miq_ip_addr }}/api/servers/{{ miq_region_id }}/settings"
+ user: "{{ miq_app_username }}"
+ password: "{{ miq_app_password }}"
+ method: GET
+ validate_certs: no
+ register: miq_active_roles_json
+
+- name: Extracting the roles from the JSON output
+ set_fact:
+ miq_active_roles: "{{ miq_active_roles_json.json.server.role.split(',') }}"
+
+- name: Remove roles from the list of active roles
+ set_fact:
+ miq_active_roles: "{{ miq_active_roles | difference(miq_disabled_roles) }}"
+
+- name: Add extra roles to list of active roles
+ set_fact:
+ miq_active_roles: "{{ miq_active_roles | union(miq_enabled_roles) }}"
+
+- name: Update list of active roles
+ uri:
+ url: https://{{ miq_ip_addr }}/api/servers/{{ miq_region_id }}/settings
+ user: "{{ miq_app_username }}"
+ password: "{{ miq_app_password }}"
+ method: PATCH
+ validate_certs: no
+ body_format: json
+ body:
+ server:
+ role: "{{ miq_active_roles | join(',') }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/wait_for_api.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/wait_for_api.yml
new file mode 100644
index 00000000..abb1be34
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/tasks/wait_for_api.yml
@@ -0,0 +1,68 @@
+- name: Wait for ManageIQ/CloudForms API
+ no_log: "{{ not miq_debug_create }}"
+ uri:
+ url: "https://{{ miq_ip_addr }}/api/"
+ validate_certs: no
+ user: "{{ miq_app_username }}"
+ password: smartvm
+ register: miq_vm
+ until: "miq_vm.status == 200"
+ retries: 50
+ delay: 20
+
+- name: Set application admin password
+ no_log: "{{ not miq_debug_create }}"
+ uri:
+ url: "https://{{ miq_ip_addr }}/api/users/{{ miq_region_id }}"
+ validate_certs: no
+ method: POST
+ user: "{{ miq_app_username }}"
+ password: smartvm
+ force_basic_auth: yes
+ body_format: json
+ body:
+ action: "edit"
+ resource:
+ password: "{{ miq_app_password | string }}"
+ register: miq_admin_password
+ changed_when: "miq_admin_password.status == 201 or miq_admin_password.status == 200"
+ failed_when:
+ - "miq_admin_password.json is defined and 'error' in miq_admin_password.json"
+
+- name: Update ManageIQ company name
+ uri:
+ url: "https://{{ miq_ip_addr }}/api/servers/{{ miq_region_id }}/settings"
+ user: "{{ miq_app_username }}"
+ password: "{{ miq_app_password }}"
+ method: PATCH
+ validate_certs: no
+ body_format: json
+ body:
+ server:
+ company: "{{ miq_company }}"
+ register: miq_update_company
+ changed_when: "miq_update_company.status == 201 or miq_update_company.status == 200"
+ failed_when:
+ - "miq_update_company.json is defined and 'error' in miq_update_company.json"
+
+- include_tasks: manage_appliance_roles.yml
+
+- name: Add oVirt/RHV provider to ManageIQ/CloudForms
+ no_log: "{{ not miq_debug_create }}"
+ uri:
+ url: "https://{{ miq_ip_addr }}/api/providers"
+ validate_certs: no
+ method: POST
+ user: "{{ miq_app_username }}"
+ password: "{{ miq_app_password }}"
+ body: "{{ lookup('template', 'add_rhv_provider.j2') }}"
+ force_basic_auth: yes
+ body_format: json
+ register: miq_rhv_provider
+ changed_when: "miq_rhv_provider.status == 201 or miq_rhv_provider.status == 200"
+ failed_when:
+ - "miq_rhv_provider.json is defined and 'error' in miq_rhv_provider.json"
+ - "miq_rhv_provider.json.error.message is defined and 'has already been taken' not in miq_rhv_provider.json.error.message"
+ # FIXME: If provider already exists with different name, don't fail, but we should change the name
+ # when there will exist any ansible module for managing providers:
+ - "miq_rhv_provider.json.error.message is defined and 'Host Name has to be unique per provider type' not in miq_rhv_provider.json.error.message"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/templates/add_rhv_provider.j2 b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/templates/add_rhv_provider.j2
new file mode 100644
index 00000000..52f2ad53
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/templates/add_rhv_provider.j2
@@ -0,0 +1,43 @@
+{
+ "type": "ManageIQ::Providers::Redhat::InfraManager",
+ "name": "{{ miq_rhv_provider_name }}",
+ "connection_configurations": [{
+ "endpoint": {
+ "role": "default",
+ {% if engine_fqdn is defined %}
+ "hostname": "{{ engine_fqdn.split(':')[0] }}",
+ {% if engine_fqdn.split(':') | length > 1 %}
+ "port": "{{ engine_fqdn.split(':')[1] }}",
+ {% endif %}
+ {% else %}
+ "hostname": "{{ engine_url | urlsplit('hostname') }}",
+ {% if engine_url | urlsplit('port') != "" %}
+ "port": "{{ engine_url | urlsplit('port') }}",
+ {% endif %}
+ {% endif %}
+ "verify_ssl": {{ engine_cafile != '' }},
+ {% if engine_cafile != '' %}
+ "certificate_authority": {{ lookup('file', engine_cafile) | to_json }}
+ {% endif %}
+ },
+ "authentication": {
+ "userid": "{{ engine_user }}",
+ "password": "{{ engine_password }}"
+ }
+ }{% if metrics_fqdn is defined %},{% endif %}
+ {% if metrics_fqdn is defined %}
+ {
+ "endpoint": {
+ "role": "metrics",
+ "path": "{{ metrics_db_name }}",
+ "hostname": "{{ metrics_fqdn }}",
+ "port": "{{ metrics_port }}"
+ },
+ "authentication": {
+ "userid": "{{ metrics_user }}",
+ "password": "{{ metrics_password }}"
+ }
+ }
+ {% endif %}
+ ]
+}
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/vars/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/vars/main.yml
new file mode 100644
index 00000000..9f9efcce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/manageiq/vars/main.yml
@@ -0,0 +1,10 @@
+---
+miq_vm_disks_types:
+ - database
+ - log
+ - tmp
+miq_init_cmd_options:
+ disks:
+ database: "-b"
+ log: "-l"
+ tmp: "-t"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/README.md
new file mode 100644
index 00000000..1809916d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/README.md
@@ -0,0 +1,84 @@
+oVirt Repositories
+==================
+
+The `repositories` role is used to set the repositories required for
+oVirt engine or host installation. By default it copies content of
+/etc/yum.repos.d/ to /tmp/repo-backup-{{timestamp}}, so it's easy to undo that operation.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|--------------------------------------------|-----------------------|-------------------------------------------|
+| ovirt_repositories_ovirt_release_rpm | UNDEF | URL of oVirt release package, which contains required repositories configuration. |
+| ovirt_repositories_ovirt_release_rpm_gpg | https://plain.resources.ovirt.org/pub/keys/RPM-GPG-ovirt-v2 | Address of the rpm GPG key. |
+| ovirt_repositories_disable_gpg_check | False | Disable the GPG check for <i>ovirt_repositories_ovirt_release_rpm</i>. by default is False unless 'master.rpm' in <i>ovirt_repositories_ovirt_release_rpm</i>. |
+| ovirt_repositories_use_subscription_manager| False | If true it will use repos from subscription manager and the value of <i>ovirt_repositories_ovirt_release_rpm</i> will be ignored. |
+| ovirt_repositories_ovirt_version | 4.4 | oVirt release version (Supported versions [4.1, 4.2, 4.3, 4.4]). Will be used to enable the required repositories and enable modules. |
+| ovirt_repositories_target_host | engine | Type of the target machine, which should be one of [engine, host, rhvh]. This parameter takes effect only in case <i>ovirt_repositories_use_subscription_manager</i> is set to True. If incorrect version or target is specified no repositories are enabled. |
+| ovirt_repositories_rh_username | UNDEF | Username to use for subscription manager. |
+| ovirt_repositories_rh_password | UNDEF | Password to use for subscription manager. |
+| ovirt_repositories_pool_ids | UNDEF | List of pools ids to subscribe to. |
+| ovirt_repositories_pools | UNDEF | Specify a list of subscription pool names. Use <i>ovirt_repositories_pool_ids</i> instead if possible, as it is much faster. |
+| ovirt_repositories_subscription_manager_repos| [] | List of repositories to enable by subscription-manager. By default we have list of repositories for each {{ovirt_repositories_target_host}}_{{ovirt_repositories_ovirt_version}} in vars folder. |
+| ovirt_repositories_repos_backup | True | When set to `False`, original repositories won't be backed up. |
+| ovirt_repositories_repos_backup_path | /tmp/repo-backup-{{timestamp}} | Directory to backup the original repositories configuration |
+| ovirt_repositories_force_register | False | Bool to register the system even if it is already registered. |
+| ovirt_repositories_rhsm_server_hostname | UNDEF | Hostname of the RHSM server. By default it's used from rhsm configuration. |
+| ovirt_repositories_clear | False | If True all repositories will be unregistered before registering new ones. |
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: Setup repositories using oVirt release package
+ hosts: localhost
+
+ vars:
+ ovirt_repositories_ovirt_release_rpm: http://resources.ovirt.org/pub/yum-repo/ovirt-master-release.rpm
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
+
+- vars_files:
+ # Contains encrypted `username` and `password` variables using ansible-vault
+ - passwords.yml
+
+- name: Setup repositories using Subscription Manager
+ hosts: localhost
+
+ vars:
+ ovirt_repositories_use_subscription_manager: True
+ ovirt_repositories_force_register: True
+ ovirt_repositories_rh_username: "{{ovirt_repositories_rh_username}}"
+ ovirt_repositories_rh_password: "{{ovirt_repositories_rh_password}}"
+ # The following pool IDs are not valid and should be replaced.
+ ovirt_repositories_pool_ids:
+ - 0123456789abcdef0123456789abcdef
+ - 1123456789abcdef0123456789abcdef
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
+
+
+- name: Setup repositories using Subscription Manager pool name
+ hosts: localhost
+
+ vars:
+ ovirt_repositories_use_subscription_manager: True
+ ovirt_repositories_force_register: True
+ ovirt_repositories_rh_username: "{{ovirt_repositories_rh_username}}"
+ ovirt_repositories_rh_password: "{{ovirt_repositories_rh_password}}"
+ ovirt_repositories_pools:
+ - "Red Hat Cloud Infrastructure, Premium (2-sockets)"
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
+```
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/defaults/main.yml
new file mode 100644
index 00000000..d49c4215
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/defaults/main.yml
@@ -0,0 +1,13 @@
+---
+ovirt_repositories_repos_backup: true
+ovirt_repositories_repos_backup_path: "/tmp/repo-backup-{{ '%Y-%m-%d-%H:%M:%S' | strftime(ansible_date_time.epoch) }}"
+ovirt_repositories_use_subscription_manager: False
+ovirt_repositories_force_register: False
+ovirt_repositories_clear: False
+ovirt_repositories_ovirt_version: 4.4
+ovirt_repositories_target_host: engine
+ovirt_repositories_subscription_manager_repos: []
+ovirt_repositories_ovirt_dnf_modules: ["pki-deps", "postgresql:12", "javapackages-tools"]
+ovirt_repositories_rh_dnf_modules: ["pki-deps", "postgresql:12"]
+ovirt_repositories_ovirt_release_rpm_gpg: https://plain.resources.ovirt.org/pub/keys/RPM-GPG-ovirt-v2
+ovirt_repositories_disable_gpg_check: "{{ True if 'master.rpm' in ovirt_repositories_ovirt_release_rpm else False }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_release_rpm.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_release_rpm.yml
new file mode 100644
index 00000000..b5aa62bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_release_rpm.yml
@@ -0,0 +1,11 @@
+---
+- name: Setup repositories using oVirt release package
+ hosts: localhost
+
+ vars:
+ ovirt_repositories_ovirt_release_rpm: http://resources.ovirt.org/pub/yum-repo/ovirt-master-release.rpm
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_subscription_manager.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_subscription_manager.yml
new file mode 100644
index 00000000..c796702d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/ovirt_repositories_subscription_manager.yml
@@ -0,0 +1,22 @@
+---
+- name: Setup repositories using Subscription Manager
+ hosts: localhost
+
+ vars_files:
+ # Contains encrypted `ovirt_repositories_rh_username`
+ # and `ovirt_repositories_rh_password` variables using ansible-vault
+ - passwords.yml
+
+ vars:
+ ovirt_repositories_use_subscription_manager: "True"
+ ovirt_repositories_rh_username: "{{ovirt_repositories_rh_username}}"
+ ovirt_repositories_rh_password: "{{ovirt_repositories_rh_password}}"
+ # The following pool IDs are not valid and should be replaced.
+ ovirt_repositories_pool_ids:
+ - 0123456789abcdef0123456789abcdef
+ - 1123456789abcdef0123456789abcdef
+
+ roles:
+ - repositories
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/passwords.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/passwords.yml
new file mode 100644
index 00000000..98a551d7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/examples/passwords.yml
@@ -0,0 +1,13 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+ovirt_repositories_rh_username: "myuser"
+ovirt_repositories_rh_password: "mypass"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/backup-repos.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/backup-repos.yml
new file mode 100644
index 00000000..6c85e4d1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/backup-repos.yml
@@ -0,0 +1,26 @@
+---
+- name: "Find repo files to backup"
+ find:
+ paths: "/etc/yum.repos.d"
+ patterns: "*.repo"
+ tags: backup
+ register: files_to_backup
+
+- name: Creating folder to backup repositories files
+ file:
+ path: "{{ ovirt_repositories_repos_backup_path }}"
+ state: directory
+ mode: 0755
+ tags: backup
+ when: files_to_backup.files
+
+- name: Copy current repositories files to backup folder
+ copy:
+ src: "{{ item.path }}"
+ dest: "{{ ovirt_repositories_repos_backup_path }}"
+ remote_src: yes
+ mode: preserve
+ with_items: "{{ files_to_backup.files }}"
+ tags:
+ - skip_ansible_lint
+ - backup
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/main.yml
new file mode 100644
index 00000000..5f6729e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+- name: Check if Subscription Manager is about to be used on non RHEL system
+ fail:
+ msg: "Subscription manager could be used only on Red Hat Enterprise Linux"
+ when:
+ - ovirt_repositories_use_subscription_manager | bool
+ - ansible_distribution != 'RedHat'
+
+- name: Backup current repositories
+ include_tasks: backup-repos.yml
+ when: ovirt_repositories_repos_backup
+
+- name: Setup repositories using Subscription Manager
+ include_tasks: rh-subscription.yml
+ when: ovirt_repositories_use_subscription_manager | bool
+
+- name: Setup repositories using oVirt release package
+ include_tasks: rpm.yml
+ when: not ovirt_repositories_use_subscription_manager | bool
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rh-subscription.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rh-subscription.yml
new file mode 100644
index 00000000..59ac9573
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rh-subscription.yml
@@ -0,0 +1,66 @@
+---
+- name: Check if mandatory variables are set
+ fail:
+ msg: "Either ovirt_repositories_pool_ids or ovirt_repositories_pools must be defined."
+ when:
+ - "ovirt_repositories_pool_ids is not defined"
+ - "ovirt_repositories_pools is not defined or ovirt_repositories_pools | list | length == 0"
+
+- name: Check if mandatory variables are set
+ fail:
+ msg: "Both ovirt_repositories_pool_ids and ovirt_repositories_pools can't be defined, only one of them."
+ when:
+ - "ovirt_repositories_pool_ids is defined"
+ - "ovirt_repositories_pools is defined"
+
+- name: Ensure subscription-manager package is installed
+ package:
+ name: subscription-manager
+ state: present
+
+- name: Register and subscribe to multiple pool IDs
+ redhat_subscription:
+ state: present
+ force_register: "{{ ovirt_repositories_force_register }}"
+ username: "{{ ovirt_repositories_rh_username | mandatory }}"
+ password: "{{ ovirt_repositories_rh_password | mandatory }}"
+ pool_ids: "{{ ovirt_repositories_pool_ids }}"
+ server_hostname: "{{ ovirt_repositories_rhsm_server_hostname | default(omit) }}"
+ when: ovirt_repositories_pool_ids is defined
+
+- name: Register to and subscribe to pool
+ redhat_subscription:
+ state: present
+ force_register: "{{ ovirt_repositories_force_register }}"
+ username: "{{ ovirt_repositories_rh_username | mandatory }}"
+ password: "{{ ovirt_repositories_rh_password | mandatory }}"
+ pool: "^({{ ovirt_repositories_pools | list | map('regex_escape') | join(')$|^(') }})$"
+ server_hostname: "{{ ovirt_repositories_rhsm_server_hostname | default(omit) }}"
+ when: ovirt_repositories_pools is defined
+
+- name: "Include {{ ovirt_repositories_target_host }}_{{ ovirt_repositories_ovirt_version }}.yml variables"
+ include_vars: "{{ ovirt_repositories_target_host }}_{{ ovirt_repositories_ovirt_version }}.yml"
+ when: ovirt_repositories_subscription_manager_repos | list | length == 0
+
+- name: Disable all repositories
+ rhsm_repository:
+ state: absent
+ name: "*"
+ when: ovirt_repositories_clear
+
+- name: Enable required repositories
+ rhsm_repository:
+ name: "{{ item }}"
+ state: "enabled"
+ with_items: "{{ ovirt_repositories_subscription_manager_repos }}"
+
+- name: Enable dnf modules
+ command: "dnf module enable -y {{ ovirt_repositories_rh_dnf_modules | join(' ') }}"
+ args:
+ warn: False
+ when:
+ - ovirt_repositories_ovirt_version|string >= '4.4'
+ - ovirt_repositories_target_host == 'engine'
+
+- name: set ovirt_repositories_subscription_manager_repos to empty list for the next time
+ include_vars: default.yml
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rpm.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rpm.yml
new file mode 100644
index 00000000..d5db9cbc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/tasks/rpm.yml
@@ -0,0 +1,20 @@
+---
+- name: Use oVirt GPG key
+ rpm_key:
+ state: present
+ key: "{{ ovirt_repositories_ovirt_release_rpm_gpg }}"
+ when: not ovirt_repositories_disable_gpg_check
+
+- name: Install oVirt release package
+ package:
+ name: "{{ ovirt_repositories_ovirt_release_rpm | mandatory }}"
+ state: present
+ disable_gpg_check: "{{ ovirt_repositories_disable_gpg_check }}"
+
+- name: Enable dnf modules
+ command: "dnf module enable -y {{ ovirt_repositories_ovirt_dnf_modules | join(' ') }}"
+ args:
+ warn: False
+ when:
+ - ovirt_repositories_ovirt_version|string >= '4.4'
+ - ovirt_repositories_target_host == 'engine'
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/default.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/default.yml
new file mode 100644
index 00000000..b15ff855
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/default.yml
@@ -0,0 +1 @@
+ovirt_repositories_subscription_manager_repos: []
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.1.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.1.yml
new file mode 100644
index 00000000..802be7a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.1.yml
@@ -0,0 +1,6 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-supplementary-rpms
+ - rhel-7-server-rhv-4.1-rpms
+ - rhel-7-server-rhv-4-tools-rpms
+ - jb-eap-7-for-rhel-7-server-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.2.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.2.yml
new file mode 100644
index 00000000..991edf08
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.2.yml
@@ -0,0 +1,7 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-supplementary-rpms
+ - rhel-7-server-rhv-4.2-manager-rpms
+ - rhel-7-server-rhv-4-manager-tools-rpms
+ - rhel-7-server-ansible-2-rpms
+ - jb-eap-7.2-for-rhel-7-server-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.3.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.3.yml
new file mode 100644
index 00000000..314c5fbc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.3.yml
@@ -0,0 +1,7 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-supplementary-rpms
+ - rhel-7-server-rhv-4.3-manager-rpms
+ - rhel-7-server-rhv-4-manager-tools-rpms
+ - rhel-7-server-ansible-2.9-rpms
+ - jb-eap-7.2-for-rhel-7-server-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.4.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.4.yml
new file mode 100644
index 00000000..fa365edd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/engine_4.4.yml
@@ -0,0 +1,7 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-8-for-x86_64-baseos-rpms
+ - rhel-8-for-x86_64-appstream-rpms
+ - rhv-4.4-manager-for-rhel-8-x86_64-rpms
+ - ansible-2.9-for-rhel-8-x86_64-rpms
+ - jb-eap-7.3-for-rhel-8-x86_64-rpms
+ - fast-datapath-for-rhel-8-x86_64-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.1.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.1.yml
new file mode 100644
index 00000000..47b23337
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.1.yml
@@ -0,0 +1,3 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-rhv-4-mgmt-agent-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.2.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.2.yml
new file mode 100644
index 00000000..edb0956d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.2.yml
@@ -0,0 +1,4 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-rhv-4-mgmt-agent-rpms
+ - rhel-7-server-ansible-2-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.3.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.3.yml
new file mode 100644
index 00000000..fe51604d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.3.yml
@@ -0,0 +1,4 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rpms
+ - rhel-7-server-rhv-4-mgmt-agent-rpms
+ - rhel-7-server-ansible-2.9-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.4.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.4.yml
new file mode 100644
index 00000000..90e73e6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/host_4.4.yml
@@ -0,0 +1,7 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-8-for-x86_64-baseos-rpms
+ - rhel-8-for-x86_64-appstream-rpms
+ - rhv-4-mgmt-agent-for-rhel-8-x86_64-rpms
+ - ansible-2.9-for-rhel-8-x86_64-rpms
+ - advanced-virt-for-rhel-8-x86_64-rpms
+ - fast-datapath-for-rhel-8-x86_64-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.1.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.1.yml
new file mode 100644
index 00000000..edaae59a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.1.yml
@@ -0,0 +1,2 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rhvh-4-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.2.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.2.yml
new file mode 100644
index 00000000..edaae59a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.2.yml
@@ -0,0 +1,2 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rhvh-4-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.3.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.3.yml
new file mode 100644
index 00000000..edaae59a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.3.yml
@@ -0,0 +1,2 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhel-7-server-rhvh-4-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.4.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.4.yml
new file mode 100644
index 00000000..aaf695bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/repositories/vars/rhvh_4.4.yml
@@ -0,0 +1,2 @@
+ovirt_repositories_subscription_manager_repos:
+ - rhvh-4-for-rhel-8-x86_64-rpms
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/README.md
new file mode 100644
index 00000000..8d16f8f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/README.md
@@ -0,0 +1,53 @@
+oVirt environment shutdown
+=========
+
+The `shutdown_env` role iterates through all the entities (vms and hosts) in an oVirt/RHV cluster and performs a clean and ordered shutdown.
+It also handles an Hosted-Engine and hyper-converged GlusterFS environment as a special case automatically detecting it.
+The role is intended to be run only against the engine machine.
+Please note that host shutdown is async and the playbook terminates before HE hosts are really down.
+
+If on an Hosted-Engine environment, global maintenance mode will be set:
+the user has to manually exit it in order to get the engine VM automatically powered up once needed.
+
+A startup mode is also available:
+in the startup mode the role will bring up all the power management configured hosts and it
+will unset the global maintenance mode if on an hosted-engine environment.
+The startup mode will be executed only if the 'startup' tag is applied; shutdown mode is the default.
+The startup mode requires the engine to be already up:
+power on it if it's a dedicated host, power on at least one of HE hosts (2 if on an hyperconverged env) and exit the global maintenance mode or manually start the engine VM with hosted-engine --vm-start
+
+According to host power on order the engine could elect a new SPM host or reconstruct the master storage domain.
+The environment can take up to 10 minutes to come back to a stable condition.
+Possible improvements are tracked here: https://bugzilla.redhat.com/1609029
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: oVirt shutdown environment
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars:
+ engine_url: https://ovirt-engine.example.com/ovirt-engine/api
+ engine_user: admin@internal
+ engine_password: 123456
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ roles:
+ - role: shutdown_env
+ collections:
+ - ovirt.ovirt
+```
+
+Demo
+----
+ Here a demo showing a clean and ordered shutdown of an hyper-converged hosted-engine environment with 3 hosts, 3 regular VMs plus the HE one.
+[![asciicast](https://asciinema.org/a/261501.svg)](https://asciinema.org/a/261501)
+
+License
+-------
+
+Apache License 2.0
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/defaults/main.yml
new file mode 100644
index 00000000..87f80142
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+host_names:
+ - '*'
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/passwords.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/passwords.yml
new file mode 100644
index 00000000..92c7613c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/shutdown_env.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/shutdown_env.yml
new file mode 100644
index 00000000..26340f80
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/examples/shutdown_env.yml
@@ -0,0 +1,18 @@
+---
+- name: oVirt shutdown env
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_url: https://ovirt.example.com/ovirt-engine/api
+ engine_user: admin@internal
+
+ roles:
+ - role: shutdown_env
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/tasks/main.yml
new file mode 100644
index 00000000..6d51bfcc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/shutdown_env/tasks/main.yml
@@ -0,0 +1,226 @@
+---
+- block:
+
+ - name: Populate service facts
+ service_facts:
+
+ - name: Enforce ovirt-engine machine
+ fail:
+ msg: >
+ This role has be designed to be run only against the machine
+ where ovirt-engine is running.
+ when: '"ovirt-engine.service" not in ansible_facts.services'
+
+ - name: Enforce ovirt-engine status
+ fail:
+ msg: >
+ ovirt-engine is required to be enabled and running in order
+ to correctly run this role.
+ when: ansible_facts.services["ovirt-engine.service"].state != 'running'
+
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ token: "{{ engine_token | default(lookup('env','OVIRT_TOKEN')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: loggedin
+ tags:
+ - always
+
+ - name: Get hosts
+ ovirt_host_info:
+ auth: "{{ ovirt_auth }}"
+ all_content: true
+ register: hosts_result
+
+ - name: Set a variable
+ set_fact:
+ startup: false
+
+ - name: Set a variable
+ set_fact:
+ startup: true
+ tags: ['never', 'startup']
+
+ - name: Define a query for HE hosts
+ set_fact:
+ he_hosts: >-
+ {{ hosts_result.ovirt_hosts | selectattr('hosted_engine', 'defined') | selectattr('hosted_engine.configured') | list }}
+
+ - name: Define a query for non HE hosts
+ set_fact:
+ non_he_hosts: >-
+ {{ hosts_result.ovirt_hosts | difference(he_hosts) }}
+
+ - name: Define a query for non HE hosts with power management
+ set_fact:
+ non_he_hosts_ipmi: >-
+ {{ non_he_hosts | selectattr('power_management', 'defined') |
+ selectattr('power_management.enabled') | list }}
+
+ - name: Define a query for non HE hosts without power management
+ set_fact:
+ non_he_hosts_noipmi: "{{ non_he_hosts | difference(non_he_hosts_ipmi) }}"
+
+ - name: Define a query for hosts with power management
+ set_fact:
+ hosts_ipmi: >-
+ {{ hosts_result.ovirt_hosts | selectattr('power_management', 'defined') | selectattr('power_management.enabled') | list }}
+
+ - name: Define commands
+ set_fact:
+ he_shutdown_cmd: >-
+ while hosted-engine --vm-status | grep "\"vm\": \"up\"" >/dev/null;
+ do sleep 5;
+ done;
+ sanlock client shutdown -f 1;
+ shutdown -h now
+ non_he_noipmi_shutdown_cmd: >-
+ while pgrep qemu-kvm >/dev/null; do sleep 5; done; shutdown -h now
+ gmaintenance_mode_cmd: >-
+ hosted-engine --set-maintenance --mode=global
+ ugmaintenance_mode_cmd: >-
+ hosted-engine --set-maintenance --mode=none
+
+ - name: Get VM list
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ all_content: true
+ register: vm_result
+
+ - block:
+ - name: Shutdown all VMs, except HostedEngine
+ ovirt_vm:
+ state: stopped
+ name: "{{ item.name }}"
+ auth: "{{ ovirt_auth }}"
+ wait: true
+ when: "item.origin != 'managed_hosted_engine'"
+ with_items:
+ - "{{ vm_result.ovirt_vms }}"
+ ignore_errors: true
+
+ - name: Refresh VM list
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ all_content: true
+ register: vm_result
+
+ - name: Forcefully shutdown remaining VMs, except HostedEngine
+ ovirt_vm:
+ state: stopped
+ name: "{{ item.name }}"
+ auth: "{{ ovirt_auth }}"
+ wait: true
+ force: true
+ when: "item.origin != 'managed_hosted_engine' and item.status != 'down'"
+ with_items:
+ - "{{ vm_result.ovirt_vms }}"
+
+ - name: Shutdown hosts, except HE ones, via IPMI (if configured)
+ ovirt_host:
+ state: stopped
+ name: "{{ item.name }}"
+ auth: "{{ ovirt_auth }}"
+ with_items:
+ - "{{ non_he_hosts_ipmi }}"
+
+ - name: Shutdown remaining non HE hosts
+ command: >-
+ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
+ -i /etc/pki/ovirt-engine/keys/engine_id_rsa
+ -p {{ item.ssh.port }}
+ -t root@{{ item.address }}
+ '{{ non_he_noipmi_shutdown_cmd }}'
+ async: 1000
+ poll: 0
+ with_items:
+ - "{{ non_he_hosts_noipmi }}"
+ ignore_errors: true
+
+ - name: Set global maintenance mode
+ command: >-
+ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
+ -i /etc/pki/ovirt-engine/keys/engine_id_rsa
+ -p {{ item.ssh.port }} -t root@{{ item.address }}
+ '{{ gmaintenance_mode_cmd }}'
+ with_items:
+ - "{{ he_hosts }}"
+ ignore_errors: true
+ register: globalmm
+
+ - set_fact:
+ globalmm_set: "{{ globalmm.results | rejectattr('failed') | list | length }}"
+ when: globalmm is defined and globalmm.results is defined
+
+ - name: Enforce global maintenance mode
+ fail:
+ msg: >
+ Failed setting global maintenance mode.
+ when: he_hosts|length > 0 and globalmm_set|int == 0
+
+ - name: Warn about HE global maintenace mode
+ debug:
+ msg: >
+ HE global maintenance mode has been set; you have to exit it to get the engine VM started when needed
+ when: globalmm_set|int > 0
+
+ - name: Shutdown of HE hosts
+ command: >-
+ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
+ -i /etc/pki/ovirt-engine/keys/engine_id_rsa -p {{ item.ssh.port }}
+ -t root@{{ item.address }} '{{ he_shutdown_cmd }}'
+ async: 1000
+ poll: 0
+ with_items:
+ - "{{ he_hosts }}"
+
+ - name: Shutdown engine host/VM
+ command: shutdown -h now
+ async: 1000
+ poll: 0
+
+ when: not startup
+
+ - block:
+ - name: Power-on IPMI configured hosts
+ ovirt_host:
+ state: started
+ name: "{{ item.name }}"
+ auth: "{{ ovirt_auth }}"
+ with_items:
+ - "{{ hosts_ipmi }}"
+
+ - name: Unset global maintenance mode
+ command: >-
+ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
+ -i /etc/pki/ovirt-engine/keys/engine_id_rsa -p {{ item.ssh.port }}
+ -t root@{{ item.address }} '{{ ugmaintenance_mode_cmd }}'
+ with_items:
+ - "{{ he_hosts }}"
+ ignore_errors: true
+ register: uglobalmm
+
+ - set_fact:
+ globalmm_set: "{{ uglobalmm.results | rejectattr('failed') | list | length }}"
+ when: uglobalmm is defined and uglobalmm.results is defined
+
+ - name: Enforce no global maintenance mode
+ fail:
+ msg: >
+ Failed unsetting global maintenance mode.
+ when: he_hosts|length > 0 and globalmm_set|int == 0
+ when: startup
+
+ always:
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when: not loggedin.skipped | default(false)
+ tags:
+ - always
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/README.md b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/README.md
new file mode 100644
index 00000000..807cb232
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/README.md
@@ -0,0 +1,324 @@
+oVirt Virtual Machine Infrastructure
+====================================
+
+The `vm_infra` role manages the virtual machine infrastructure in oVirt.
+This role also creates inventory of created virtual machines it defines if
+`wait_for_ip` is set to `true` and state of virtual machine is `running`.
+All defined virtual machines are part of `ovirt_vm` inventory group.
+Role also creates `ovirt_tag_{tag_name}` groups if there are any
+tags assigned to a virtual machine and places all virtual machines with that tag
+to that inventory group.
+
+Consider the following variable structure:
+
+```yaml
+vms:
+ - name: myvm1
+ tag: mytag1
+ profile: myprofile
+
+ - name: myvm2
+ tag: mytag2
+ profile: myprofile
+```
+
+The role will create inventory group `ovirt_vm` with both of the virtual
+machines - `myvm1` and `myvm2`. The role also creates inventory group `ovirt_tag_mytag1`
+with virtual machine `myvm1` and inventory group `ovirt_tag_mytag2` with virtual
+machine `myvm2`.
+
+Limitations
+-----------
+
+ * Does not support Ansible Check Mode (Dry Run).
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|--------------------------------|---------------|----------------------------------------------|
+| vms | UNDEF | List of dictionaries with virtual machine specifications. |
+| affinity_groups | UNDEF | List of dictionaries with affinity groups specifications. |
+| wait_for_ip | false | If true, the playbook should wait for the virtual machine IP reported by the guest agent. |
+| wait_for_ip_version | v4 | Specify which IP version should be wait for. Either v4 or v6. |
+| wait_for_ip_range | 0.0.0.0/0 | Specify CIDR of virutal machine IP which should be reported. Works only for IPv4. |
+| debug_vm_create | false | If true, logs the tasks of the virtual machine being created. The log can contain passwords. |
+| vm_infra_create_single_timeout | 180 | Time in seconds to wait for VM to be created and started (if state is running). |
+| vm_infra_create_poll_interval | 15 | Polling interval. Time in seconds to wait between check of state of VM. |
+| vm_infra_create_all_timeout | vm_infra_create_single_timeout * (vms.length) | Total time to wait for all VMs to be created/started. |
+| vm_infra_wait_for_ip_retries | 5 | Number of retries to check if VM is reporting it's IP address. |
+| vm_infra_wait_for_ip_delay | 5 | Polling interval of IP address. Time in seconds to wait between check if VM reports IP address. |
+
+
+The `vms` and `profile` variables can contain following attributes, note that if you define same variable in both the value in `vms` has precendence:
+
+| Name | Default value | |
+|--------------------|-----------------------|--------------------------------------------|
+| name | UNDEF | Name of the virtual machine to create. |
+| tag | UNDEF | Name of the tag to assign to the virtual machine. Only administrator users can use this attribute. |
+| cloud_init | UNDEF | Dictionary with values for Unix-like Virtual Machine initialization using cloud init. See below <i>cloud_init</i> section for more detailed description. |
+| cloud_init_nics | UNDEF | List of dictionaries representing network interafaces to be setup by cloud init. See below <i>cloud_init_nics</i> section for more detailed description. |
+| sysprep | UNDEF | Dictionary with values for Windows Virtual Machine initialization using sysprep. See below <i>sysprep</i> section for more detailed description. |
+| profile | UNDEF | Dictionary specifying the virtual machine hardware. See the table below. |
+| state | present | Should the Virtual Machine be stopped, present or running. Takes precedence before state value in profile. |
+| nics | UNDEF | List of dictionaries specifying the NICs of the virtual machine. See below for more detailed description. |
+| cluster | UNDEF | Name of the cluster where the virtual machine will be created. |
+| clone | No | If yes then the disks of the created virtual machine will be cloned and independent of the template. This parameter is used only when state is running or present and VM didn't exist before. |
+| template | Blank | Name of template that the virtual machine should be based on. |
+| template_version | UNDEF | Version number of the template to be used for VM. By default the latest available version of the template is used. |
+| memory | UNDEF | Amount of virtual machine memory. |
+| memory_max | UNDEF | Upper bound of virtual machine memory up to which memory hot-plug can be performed. |
+| memory_guaranteed | UNDEF | Amount of minimal guaranteed memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB). <i>memory_guaranteed</i> parameter can't be lower than <i>memory</i> parameter. |
+| cores | UNDEF | Number of CPU cores used by the the virtual machine. |
+| sockets | UNDEF | Number of virtual CPUs sockets of the Virtual Machine. |
+| cpu_shares | UNDEF | Set a CPU shares for this Virtual Machine. |
+| cpu_threads | UNDEF | Set a CPU threads for this Virtual Machine. |
+| disks | UNDEF | List of dictionaries specifying the additional virtual machine disks. See below for more detailed description. |
+| nics | UNDEF | List of dictionaries specifying the NICs of the virtual machine. See below for more detailed description. |
+| custom_properties | UNDEF | Properties sent to VDSM to configure various hooks.<br/> Custom properties is a list of dictionary which can have following values: <br/><i>name</i> - Name of the custom property. For example: hugepages, vhost, sap_agent, etc.<br/><i>regexp</i> - Regular expression to set for custom property.<br/><i>value</i> - Value to set for custom property. |
+| high_availability | UNDEF | Whether or not the node should be set highly available. |
+| high_availability_priority | UNDEF | Indicates the priority of the virtual machine inside the run and migration queues. Virtual machines with higher priorities will be started and migrated before virtual machines with lower priorities. The value is an integer between 0 and 100. The higher the value, the higher the priority. If no value is passed, default value is set by oVirt/RHV engine. |
+| io_threads | UNDEF | Number of IO threads used by virtual machine. 0 means IO threading disabled. |
+| description | UNDEF | Description of the Virtual Machine. |
+| operating_system | UNDEF | Operating system of the Virtual Machine. For example: rhel_7x64 |
+| type | UNDEF | Type of the Virtual Machine. Possible values: desktop, server or high_performance |
+| graphical_console | UNDEF | Assign graphical console to the virtual machine.<br/>Graphical console is a dictionary which can have following values:<br/><i>headless_mode</i> - If true disable the graphics console for this virtual machine.<br/><i>protocol</i> - 'VNC', 'Spice' or both. |
+| storage_domain | UNDEF | Name of the storage domain where all virtual machine disks should be created. Considered only when template is provided.|
+| state | present | Should the Virtual Machine be stopped, present or running.|
+| ssh_key | UNDEF | SSH key to be deployed to the virtual machine. This is parameter is keep for backward compatibility and has precendece before <i>authorized_ssh_keys</i> in <i>cloud_init</i> dictionary. |
+| domain | UNDEF | The domain of the virtual machine. This is parameter is keep for backward compatibility and has precendece before <i>host_name</i> in <i>cloud_init</i> or <i>sysprep</i> dictionary.|
+| lease | UNDEF | Name of the storage domain this virtual machine lease reside on. |
+| root_password | UNDEF | The root password of the virtual machine. This is parameter is keep for backward compatibility and has precendece before <i>root_password</i> in <i>cloud_init</i> or <i>sysprep</i> dictionary.|
+| host | UNDEF | If you need to set cpu_mode as host_passthrough, you need to use this param to define host to use along with placement_policy set to pinned. |
+| cpu_mode | UNDEF | CPU mode of the virtual machine. It can be some of the following: host_passthrough, host_model or custom. |
+| placement_policy | UNDEF | The configuration of the virtual machine's placement policy. |
+| boot_devices | UNDEF | List of boot devices which should be used to boot. Valid entries are `cdrom`, `hd`, `network`. |
+| serial_console | UNDEF | True enable VirtIO serial console, False to disable it. By default is chosen by oVirt/RHV engine. |
+| serial_policy | UNDEF | Specify a serial number policy for the Virtual Machine. Following options are supported. <br/><i>vm</i> - Sets the Virtual Machine's UUID as its serial number. <br/><i>host</i> - Sets the host's UUID as the Virtual Machine's serial number. <br/><i>custom</i> - Allows you to specify a custom serial number in serial_policy_value. |
+| serial_policy_value | UNDEF | Allows you to specify a custom serial number. This parameter is used only when <i>serial_policy</i> is custom. |
+| comment | UNDEF | Comment of the virtual Machine. |
+
+The item in `disks` list of `profile` dictionary can contain following attributes:
+
+| Name | Default value | |
+|--------------------|----------------|----------------------------------------------|
+| size | UNDEF | The size of the additional disk. |
+| name | UNDEF | The name of the additional disk. |
+| id | UNDEF | Id of the disk. If you pass id of the disk and name the disk will be looked up by id and will update name of the disk if it differs from the name passed in name parameter. |
+| storage_domain | UNDEF | The name of storage domain where disk should be created. |
+| interface | UNDEF | The interface of the disk. |
+| name_prefix | True | If true the name of the vm will be used as prefix of disk name. If false only the name of disk will be used as disk name - could be useful when creating vm from template with custom disk size. |
+| format | UNDEF | Specify format of the disk. <ul><li>cow - If set, the disk will by created as sparse disk, so space will be allocated for the volume as needed. This format is also known as thin provisioned disks</li><li>raw - If set, disk space will be allocated right away. This format is also known as preallocated disks.</li></ul> |
+| bootable | UNDEF | True if the disk should be bootable. |
+| activate | UNDEF | True if the disk should be activated |
+
+The item in `nics` list of `profile` dictionary can contain following attributes:
+
+| Name | Default value | |
+|--------------------|----------------|----------------------------------------------|
+| name | UNDEF | The name of the network interface. |
+| interface | UNDEF | Type of the network interface. |
+| mac_address | UNDEF | Custom MAC address of the network interface, by default it's obtained from MAC pool. |
+| network | UNDEF | Logical network which the VM network interface should use. If network is not specified, then Empty network is used. |
+| profile | UNDEF | Virtual network interface profile to be attached to VM network interface. |
+
+The `affinity_groups` list can contain following attributes:
+
+| Name | Default value | |
+|--------------------|---------------------|----------------------------------------------|
+| cluster | UNDEF (Required) | Name of the cluster of the affinity group. |
+| description | UNDEF | Human readable description. |
+| host_enforcing | false | <ul><li>true - VM cannot start on host if it does not satisfy the `host_rule`.</li><li>false - VM will follow `host_rule` with soft enforcement.</li></ul>|
+| host_rule | UNDEF | <ul><li>positive - VM's in this group must run on this host.</li> <li>negative - VM's in this group may not run on this host</li></ul> |
+| hosts | UNDEF | List of host names assigned to this group. |
+| name | UNDEF (Required) | Name of affinity group. |
+| state | UNDEF | Whether group should be present or absent. |
+| vm_enforcing | false | <ul><li>true - VM cannot start if it cannot satisfy the `vm_rule`.</li><li>false - VM will follow `vm_rule` with soft enforcement.</li></ul> |
+| vm_rule | UNDEF | <ul><li>positive - all vms in this group try to run on the same host.</li><li>negative - all vms in this group try to run on separate hosts.</li><li>disabled - this affinity group does not take effect.</li></ul> |
+| vms | UNDEF | List of VM's to be assigned to this affinity group. |
+| wait | true | If true, the module will wait for the desired state. |
+
+The `affinity_labels` list can contain following attributes:
+
+| Name | Default value | |
+|--------------------|---------------------|----------------------------------------------|
+| cluster | UNDEF (Required) | Name of the cluster of the affinity label group. |
+| hosts | UNDEF | List of host names assigned to this label. |
+| name | UNDEF (Required) | Name of affinity label. |
+| state | UNDEF | Whether label should be present or absent. |
+| vms | UNDEF | List of VM's to be assigned to this affinity label. |
+
+The `cloud_init` dictionary can contain following attributes:
+
+| Name | Description |
+|---------------------|------------------------------------------------------|
+| host_name | Hostname to be set to Virtual Machine when deployed. |
+| timezone | Timezone to be set to Virtual Machine when deployed. |
+| user_name | Username to be used to set password to Virtual Machine when deployed. |
+| root_password | Password to be set for user specified by user_name parameter. By default it's set for root user. |
+| authorized_ssh_keys | Use this SSH keys to login to Virtual Machine. |
+| regenerate_ssh_keys | If True SSH keys will be regenerated on Virtual Machine. |
+| custom_script | Cloud-init script which will be executed on Virtual Machine when deployed. This is appended to the end of the cloud-init script generated by any other options. |
+| dns_servers | DNS servers to be configured on Virtual Machine. |
+| dns_search | DNS search domains to be configured on Virtual Machine. |
+| nic_boot_protocol | Set boot protocol of the network interface of Virtual Machine. Can be one of none, dhcp or static. |
+| nic_ip_address | If boot protocol is static, set this IP address to network interface of Virtual Machine. |
+| nic_netmask | If boot protocol is static, set this netmask to network interface of Virtual Machine. |
+| nic_gateway | If boot protocol is static, set this gateway to network interface of Virtual Machine. |
+| nic_name | Set name to network interface of Virtual Machine. |
+| nic_on_boot | If True network interface will be set to start on boot. |
+
+The `sysprep` dictionary can contain following attributes:
+
+| Name | Description |
+|---------------------|------------------------------------------------------|
+| host_name | Hostname to be set to Virtual Machine when deployed. |
+| active_directory_ou | Active Directory Organizational Unit, to be used for login of user. |
+| org_name | Organization name to be set to Windows Virtual Machine. |
+| user_name | Username to be used for set password to Windows Virtual Machine. |
+| root_password | Password to be set for user specified by user_name parameter. By default it's set for root user. |
+| windows_license_key | License key to be set to Windows Virtual Machine. |
+| input_locale | Input localization of the Windows Virtual Machine. |
+| system_locale | System localization of the Windows Virtual Machine. |
+| ui_language | UI language of the Windows Virtual Machine. |
+| domain | Domain to be set to Windows Virtual Machine. |
+| timezone | Timezone to be set to Windows Virtual Machine. |
+
+The `cloud_init_nics` List of dictionaries representing network interafaces to be setup by cloud init. This option is used, when user needs to setup more network interfaces via cloud init.
+If one network interface is enough, user should use cloud_init nic_* parameters. cloud_init nic_* parameters are merged with cloud_init_nics parameters. Dictionary can contain following values.
+
+| Name | Description |
+|---------------------|------------------------------------------------------|
+| nic_boot_protocol | Set boot protocol of the network interface of Virtual Machine. Can be one of none, dhcp or static. |
+| nic_ip_address | If boot protocol is static, set this IP address to network interface of Virtual Machine. |
+| nic_netmask | If boot protocol is static, set this netmask to network interface of Virtual Machine. |
+| nic_gateway | If boot protocol is static, set this gateway to network interface of Virtual Machine. |
+| nic_name | Set name to network interface of Virtual Machine. |
+| nic_on_boot | If True network interface will be set to start on boot. |
+
+Example Playbook
+----------------
+
+```yaml
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ httpd_vm:
+ cluster: production
+ domain: example.com
+ template: rhel7
+ memory: 2GiB
+ cores: 2
+ ssh_key: ssh-rsa AAA...LGx user@fqdn
+ disks:
+ - size: 10GiB
+ name: data
+ storage_domain: mynfsstorage
+ interface: virtio
+
+ db_vm:
+ cluster: production
+ domain: example.com
+ template: rhel7
+ memory: 4GiB
+ cores: 1
+ ssh_key: ssh-rsa AAA...LGx user@fqdn
+ disks:
+ - size: 50GiB
+ name: data
+ storage_domain: mynfsstorage
+ interface: virtio
+ nics:
+ - name: ovirtmgmt
+ network: ovirtmgmt
+ profile: ovirtmgmt
+
+ vms:
+ - name: postgresql-vm-0
+ tag: postgresql_vm
+ profile: "{{ db_vm }}"
+ - name: postgresql-vm-1
+ tag: postgresql_vm
+ profile: "{{ db_vm }}"
+ - name: apache-vm
+ tag: httpd_vm
+ profile: "{{ httpd_vm }}"
+
+ affinity_groups:
+ - name: db-ag
+ cluster: production
+ vm_enforcing: true
+ vm_rule: negative
+ vms:
+ - postgresql-vm-0
+ - postgresql-vm-1
+
+ roles:
+ - vm_infra
+ collections:
+ - ovirt.ovirt
+```
+
+The example below shows how to use inventory created by `vm_infra` role in follow-up play.
+
+```yaml
+---
+- name: Deploy apache VM
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ wait_for_ip: true
+
+ httpd_vm:
+ cluster: production
+ state: running
+ domain: example.com
+ template: rhel7
+ memory: 2GiB
+ cores: 2
+ ssh_key: ssh-rsa AAA...LGx user@fqdn
+ disks:
+ - size: 10GiB
+ name: data
+ storage_domain: mynfsstorage
+ interface: virtio
+
+ vms:
+ - name: apache-vm
+ tag: apache
+ profile: "{{ httpd_vm }}"
+
+ roles:
+ - vm_infra
+ collections:
+ - ovirt.ovirt
+
+- name: Deploy apache on VM
+ hosts: ovirt_tag_apache
+
+ vars_files:
+ - apache_vars.yml
+
+ roles:
+ - geerlingguy.apache
+```
+
+[![asciicast](https://asciinema.org/a/111662.png)](https://asciinema.org/a/111662)
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/defaults/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/defaults/main.yml
new file mode 100644
index 00000000..1a68cd04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+debug_vm_create: false
+wait_for_ip: false
+wait_for_ip_version: v4
+wait_for_ip_range: "0.0.0.0/0"
+
+# Create VMs timeouts:
+vm_infra_create_single_timeout: 180
+vm_infra_create_poll_interval: 15
+vm_infra_create_all_timeout: "{{ vm_infra_create_single_timeout * (vms | length)|int }}"
+
+# Wait for IPs timeouts:
+vm_infra_wait_for_ip_retries: 5
+vm_infra_wait_for_ip_delay: 5
+vms_passwords: []
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra.yml
new file mode 100644
index 00000000..8069bd79
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra.yml
@@ -0,0 +1,48 @@
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ debug_vm_create: yes
+ db_vm:
+ state: running
+ cluster: mycluster
+ template: centos7
+ memory: 1GiB
+ memory_max: 2GiB
+ cores: 1
+ tag:
+ - db
+ - dbvm
+ disks:
+ - size: 1GiB
+ name: data
+ storage_domain: data
+ interface: virtio
+
+ vms:
+ - name: postgresql-vm-0
+ memory: 2GiB
+ cloud_init:
+ host_name: ps.example.com
+ root_password: 'mypassword'
+ authorized_ssh_keys: ssh-rsa A...LGx ondra@ondra
+ profile: "{{ db_vm }}"
+ tag:
+ - pgsql
+ - httpd
+
+ roles:
+ - vm_infra
+ collections:
+ - ovirt.ovirt
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra_inv.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra_inv.yml
new file mode 100644
index 00000000..354145a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/ovirt_vm_infra_inv.yml
@@ -0,0 +1,72 @@
+---
+- name: oVirt infra
+ hosts: localhost
+ connection: local
+ gather_facts: false
+
+ vars_files:
+ # Contains encrypted `engine_password` varibale using ansible-vault
+ - passwords.yml
+
+ vars:
+ engine_fqdn: ovirt-engine.example.com
+ engine_user: admin@internal
+ engine_cafile: /etc/pki/ovirt-engine/ca.pem
+
+ # Must be set to true to create inventory
+ wait_for_ip: true
+ httpd_vm:
+ # Must be set to running to create inventory
+ state: running
+ cluster: mycluster
+ template: mytemplate
+ memory: 1GiB
+ memory_max: 2GiB
+ cores: 1
+ root_password: '123456'
+
+ db_vm:
+ # Must be set to running to create inventory
+ state: running
+ cluster: mycluster
+ template: mytemplate
+ memory: 1GiB
+ memory_max: 2GiB
+ cores: 1
+ tag: db
+ ssh_key: ssh-rsa AAAAB...Gx ondra@ondra
+
+ vms:
+ - name: httpd-vm-1
+ profile: httpd_vm
+ tag: httpd
+ - name: db-vm-1
+ profile: db_vm
+ - name: db-vm-2
+ profile: db_vm
+
+ roles:
+ - vm_infra
+ collections:
+ - ovirt.ovirt
+
+
+# This role also creates inventory of created virtual machines it defines if wait_for_ip is set to true
+# and VM state is running. All defined virtual machine are part of ovirt_vm inventory group. Role also
+# create ovirt_tag_{tag_name} groups if there are any tags assigned to the virtual machine and place
+# all virtual machine with that tag to that inventory group.
+- name: Print info about httpd VM
+ hosts: ovirt_tag_httpd
+
+ tasks:
+ - name: Print info about httpd VM
+ debug:
+ msg: "{{ hostvars }}"
+
+- name: Print info about db VMs
+ hosts: ovirt_tag_db
+
+ tasks:
+ - name: Print info about db VM
+ debug:
+ msg: "{{ hostvars }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/passwords.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/passwords.yml
new file mode 100644
index 00000000..92c7613c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/examples/passwords.yml
@@ -0,0 +1,12 @@
+---
+# As an example this file is keep in plaintext, if you want to
+# encrypt this file, please execute following command:
+#
+# $ ansible-vault encrypt passwords.yml
+#
+# It will ask you for a password, which you must then pass to
+# ansible interactively when executing the playbook.
+#
+# $ ansible-playbook myplaybook.yml --ask-vault-pass
+#
+engine_password: 123456
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_groups.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_groups.yml
new file mode 100644
index 00000000..c95e3978
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_groups.yml
@@ -0,0 +1,22 @@
+---
+#########################################
+# Affinity groups
+#########################################
+- name: Create affinity groups
+ ovirt_affinity_group:
+ auth: "{{ ovirt_auth }}"
+ cluster: "{{ item.cluster | default(omit) }}"
+ description: "{{ item.description | default(omit) }}"
+ host_enforcing: "{{ item.host_enforcing | default(omit) }}"
+ host_rule: "{{ item.host_rule | default(omit) }}"
+ hosts: "{{ item.hosts | default(omit) }}"
+ name: "{{ item.name }}"
+ state: "{{ item.state | default(omit) }}"
+ vm_enforcing: "{{ item.vm_enforcing | default(omit) }}"
+ vm_rule: "{{ item.vm_rule | default(omit) }}"
+ vms: "{{ item.vms | default([]) }}"
+ wait: "{{ item.wait | default(omit) }}"
+ with_items:
+ - "{{ affinity_groups | default([]) }}"
+ tags:
+ - affinity_groups
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_labels.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_labels.yml
new file mode 100644
index 00000000..cf945dba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/affinity_labels.yml
@@ -0,0 +1,13 @@
+---
+- name: Create affinity labels
+ ovirt_affinity_label:
+ auth: "{{ ovirt_auth }}"
+ cluster: "{{ item.cluster | default(omit) }}"
+ hosts: "{{ item.hosts | default(omit) }}"
+ name: "{{ item.name }}"
+ state: "{{ item.state | default(omit) }}"
+ vms: "{{ item.vms | default([]) }}"
+ with_items:
+ - "{{ affinity_labels | default([]) }}"
+ tags:
+ - affinity_labels
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_inventory.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_inventory.yml
new file mode 100644
index 00000000..abdb5911
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_inventory.yml
@@ -0,0 +1,44 @@
+---
+- name: Fetch created VMs
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ vms | map(attribute='name') | join(' or name=') }}"
+ fetch_nested: true
+ nested_attributes:
+ - ips
+ - name
+ register: created_vms_info
+
+- name: Set ovirt_vms
+ set_fact:
+ ovirt_vms: "{{ created_vms_info.ovirt_vms }}"
+
+- name: Create inventory of VMs IPv4
+ no_log: true
+ add_host:
+ name: "{{ item.name }}"
+ ansible_host: "{{ item | ovirt.ovirt.ovirtvmipv4(network_ip=wait_for_ip_range) }}"
+ groups: "{{ (['ovirt_tag_'] * item.tags | length) | zip(item.tags | map(attribute='name') | list) | map('join') | list + ['ovirt_vm'] }}"
+ ansible_user: root
+ ansible_password: "{{ vms_passwords | ovirt.ovirt.filtervalue('name', item.name) | map(attribute='root_password') | first | default(omit) }}"
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+ with_items: "{{ ovirt_vms }}"
+ changed_when: false
+ when: "wait_for_ip_version == 'v4'"
+ loop_control:
+ label: "{{ item.name }}"
+
+- name: Create inventory of VMs IPv6
+ no_log: true
+ add_host:
+ name: "{{ item.name }}"
+ ansible_host: "{{ item | ovirt.ovirt.ovirtvmipv6 }}"
+ groups: "{{ (['ovirt_tag_'] * item.tags | length) | zip(item.tags | map(attribute='name') | list) | map('join') | list + ['ovirt_vm'] }}"
+ ansible_user: root
+ ansible_password: "{{ vms_passwords | ovirt.ovirt.filtervalue('name', item.name) | map(attribute='root_password') | first | default(omit) }}"
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+ with_items: "{{ ovirt_vms }}"
+ changed_when: false
+ when: "wait_for_ip_version == 'v6'"
+ loop_control:
+ label: "{{ item.name }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_vms.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_vms.yml
new file mode 100644
index 00000000..2dc9f50c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/create_vms.yml
@@ -0,0 +1,48 @@
+---
+- name: "Create VM {{ current_vm.name }}"
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: "present"
+ name: "{{ current_vm.name }}"
+ clone: "{{ current_vm.clone | default(current_vm.profile.clone) | default(omit) }}"
+ cluster: "{{ current_vm.cluster | default(current_vm.profile.cluster) | default(omit) }}"
+ template: "{{ current_vm.template | default(current_vm.profile.template) | default(omit) }}"
+ template_version: "{{ current_vm.template_version | default(current_vm.profile.template_version) | default(omit) }}"
+ ballooning_enabled: "{{ current_vm.ballooning_enabled | default(current_vm.profile.ballooning_enabled) | default(omit) }}"
+ host: "{{ current_vm.host | default(current_vm.profile.host) | default(omit) }}"
+ memory: "{{ current_vm.memory | default(current_vm.profile.memory) | default(omit) }}"
+ memory_max: "{{ current_vm.memory_max | default(current_vm.profile.memory_max) | default(omit) }}"
+ memory_guaranteed: "{{ current_vm.memory_guaranteed | default(current_vm.profile.memory_guaranteed) | default(omit) }}"
+ cpu_cores: "{{ current_vm.cores | default(current_vm.profile.cores) | default(omit) }}"
+ cpu_sockets: "{{ current_vm.sockets | default(current_vm.profile.sockets) | default(omit) }}"
+ cpu_shares: "{{ current_vm.cpu_shares | default(current_vm.profile.cpu_shares) | default(omit) }}"
+ cpu_threads: "{{ current_vm.cpu_threads | default(current_vm.profile.cpu_threads) | default(omit) }}"
+ cpu_mode: "{{ current_vm.cpu_mode | default(current_vm.profile.cpu_mode) | default(omit) }}"
+ boot_devices: "{{ current_vm.boot_devices | default(current_vm.profile.boot_devices) | default(omit) }}"
+ placement_policy: "{{ 'user_migratable'
+ if ((current_vm.profile.cpu_mode is defined and current_vm.profile.cpu_mode == 'host_passthrough')
+ or (current_vm.cpu_mode is defined and current_vm.cpu_mode == 'host_passthrough'))
+ else current_vm.placement_policy | default(current_vm.profile.placement_policy) | default(omit) }}"
+ custom_properties: "{{ current_vm.custom_properties | default(current_vm.profile.custom_properties) | default(omit) }}"
+ description: "{{ current_vm.description | default(current_vm.profile.description) | default(omit) }}"
+ operating_system: "{{ current_vm.operating_system | default(current_vm.profile.operating_system) | default(omit) }}"
+ type: "{{ current_vm.type | default(current_vm.profile.type) | default(omit) }}"
+ high_availability: "{{ current_vm.high_availability | default(current_vm.profile.high_availability) | default(omit) }}"
+ high_availability_priority: "{{ current_vm.high_availability_priority | default(current_vm.profile.high_availability_priority) | default(omit) }}"
+ io_threads: "{{ current_vm.io_threads | default(current_vm.profile.io_threads ) | default(omit) }}"
+ storage_domain: "{{ current_vm.storage_domain | default(current_vm.profile.storage_domain) | default(omit) }}"
+ disk_format: "{{ current_vm.disk_format | default(current_vm.profile.disk_format) | default(omit) }}"
+ lease: "{{ current_vm.lease | default(current_vm.profile.lease) | default(omit) }}"
+ serial_console: "{{ current_vm.serial_console | default(current_vm.profile.serial_console) | default(omit) }}"
+ serial_policy: "{{ current_vm.serial_policy | default(current_vm.profile.serial_policy) | default(omit) }}"
+ serial_policy_value: "{{ current_vm.serial_policy_value | default(current_vm.profile.serial_policy_value) | default(omit) }}"
+ timeout: "{{ vm_infra_create_single_timeout }}"
+ comment: "{{ current_vm.comment | default(current_vm.profile.comment) | default(omit) }}"
+ changed_when: false
+ async: "{{ vm_infra_create_single_timeout }}"
+ poll: 0
+ register: added_vm
+
+- name: "Add created vm to all_vms"
+ set_fact:
+ all_vms: "{{ all_vms | default([]) + [added_vm] }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/main.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/main.yml
new file mode 100644
index 00000000..63f97bbb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/main.yml
@@ -0,0 +1,62 @@
+---
+- name: Set sensitive vms
+ set_fact:
+ sensitive_vms: "{{ vms }}"
+ no_log: true
+
+- name: Check if VMs are correct
+ set_fact:
+ vms: "{{ vms | ovirt.ovirt.removesensitivevmdata }}"
+
+- block:
+ - name: Login to oVirt
+ ovirt_auth:
+ url: "{{ engine_url | default(lookup('env','OVIRT_URL')) | default(omit) }}"
+ hostname: "{{ engine_fqdn | default(lookup('env','OVIRT_HOSTNAME')) | default(omit) }}"
+ username: "{{ engine_user | default(lookup('env','OVIRT_USERNAME')) | default(omit) }}"
+ password: "{{ engine_password | default(lookup('env','OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ engine_cafile | default(lookup('env','OVIRT_CAFILE')) | default(omit) }}"
+ insecure: "{{ engine_insecure | default(true) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: loggedin
+
+ - name: Split list of VMs
+ set_fact:
+ create_vms: "{{ create_vms | default([]) + [item] }}"
+ with_items: "{{ vms }}"
+ when: (item.state is undefined and item.profile.state is defined and item.profile.state != 'absent')
+ or (item.state is defined and item.state != 'absent') or (item.state is undefined and item.profile.state is undefined)
+ # Uses item state first if not defined it will check profile state and use it.
+
+ - name: Split list of sensitive VMs
+ set_fact:
+ create_sensitive_vms: "{{ create_sensitive_vms | default([]) + [item] }}"
+ with_items: "{{ sensitive_vms }}"
+ no_log: True
+ when: (item.state is undefined and item.profile.state is defined and item.profile.state != 'absent')
+ or (item.state is defined and item.state != 'absent') or (item.state is undefined and item.profile.state is undefined)
+ # Uses item state first if not defined it will check profile state and use it.
+
+ - name: Delete VM
+ include_tasks: vm_state_absent.yml
+ with_items: "{{ vms }}"
+ loop_control:
+ loop_var: current_vm
+ when: (current_vm.state is defined and current_vm.state == 'absent')
+ or (current_vm.profile.state is defined and current_vm.state is undefined and current_vm.profile.state == 'absent')
+
+ - name: Include create VM
+ include_tasks: vm_state_present.yml
+ when: create_vms is defined
+
+ always:
+ - name: Unset facts
+ set_fact:
+ create_vms: []
+ create_sensitive_vms: []
+
+ - name: Logout from oVirt
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+ when: not loggedin.skipped | default(false)
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/manage_state.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/manage_state.yml
new file mode 100644
index 00000000..8c248809
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/manage_state.yml
@@ -0,0 +1,80 @@
+---
+########################################################################
+# Legacy modification of cloud_init:
+########################################################################
+- block:
+ - name: Set cloud_init fact
+ set_fact:
+ cloud_init: "{{ current_vm.cloud_init | default(current_vm.profile.cloud_init) | default({}) }}"
+
+ - block:
+ - set_fact:
+ cloud_init: "{{ cloud_init | combine({'user_name': cloud_init.user_name | default('root') }) }}"
+
+ - set_fact:
+ cloud_init: "{{ cloud_init | combine({'authorized_ssh_keys': current_vm.ssh_key | default(current_vm.profile.ssh_key)}) }}"
+ when: current_vm.profile.ssh_key is defined or current_vm.ssh_key is defined
+
+ - set_fact:
+ cloud_init: "{{ cloud_init | combine({'root_password': current_vm.root_password | default(current_vm.profile.root_password)}) }}"
+ when: current_vm.profile.root_password is defined or current_vm.root_password is defined
+
+ - set_fact:
+ cloud_init: "{{ cloud_init | combine({'host_name': current_vm.name~'.'~current_vm.domain | default(current_vm.profile.domain)}) }}"
+ when: current_vm.profile.domain is defined or current_vm.domain is defined
+
+ - name: Define vm/password dictionary
+ set_fact:
+ vms_passwords: "{{ vms_passwords + [{'name': current_vm.name, 'root_password': cloud_init.root_password}] }}"
+ when: "'root_password' in cloud_init"
+ when: current_vm.cloud_init is defined or current_vm.profile.cloud_init is defined
+ no_log: true
+
+########################################################################
+# Legacy modification of sysprep:
+########################################################################
+
+- block:
+ - name: Set sysprep fact
+ set_fact:
+ sysprep: "{{ current_vm.sysprep | default(current_vm.profile.sysprep) | default({}) }}"
+
+ - block:
+ - set_fact:
+ sysprep: "{{ sysprep | combine({'user_name': sysprep.user_name | default('Administrator') }) }}"
+
+ - set_fact:
+ sysprep: "{{ sysprep | combine({'root_password': current_vm.root_password | default(current_vm.profile.root_password)}) }}"
+ when: current_vm.profile.root_password is defined or current_vm.root_password is defined
+
+ - set_fact:
+ sysprep: "{{ sysprep | combine({'host_name': current_vm.name~'.'~current_vm.domain | default(current_vm.profile.domain)}) }}"
+ when: current_vm.profile.domain is defined or current_vm.domain is defined
+
+ - name: Define vm/password dictionary
+ set_fact:
+ vms_passwords: "{{ vms_passwords + [{'name': current_vm.name, 'root_password': sysprep.root_password}] }}"
+ when: "'root_password' in sysprep"
+ when: current_vm.sysprep is defined or current_vm.profile.sysprep is defined
+ no_log: true
+########################################################################
+########################################################################
+
+- name: "Manage VM '{{ current_vm.name }}' state"
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: "{{ current_vm.state | default(current_vm.profile.state) | default('present') }}"
+ name: "{{ current_vm.name }}"
+ sysprep: "{{ (sysprep | length > 0) | ternary(sysprep, omit) }}"
+ cloud_init: "{{ (cloud_init | length > 0) | ternary(cloud_init, omit) }}"
+ cloud_init_persist: "{{ current_vm.cloud_init_persist | default(current_vm.profile.cloud_init_persist) | default(omit) }}"
+ cloud_init_nics: "{{ current_vm.cloud_init_nics | default(current_vm.profile.cloud_init_nics) | default(omit) }}"
+ timeout: "{{ vm_infra_create_single_timeout }}"
+ changed_when: false
+ async: "{{ vm_infra_create_single_timeout }}"
+ poll: 0
+ register: started_vm
+
+- name: Set started_vms list
+ set_fact:
+ started_vms: "{{ started_vms | default([]) + [started_vm] }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_absent.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_absent.yml
new file mode 100644
index 00000000..386344a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_absent.yml
@@ -0,0 +1,6 @@
+---
+- name: "Remove VM '{{ current_vm.name }}'"
+ ovirt_vm:
+ auth: "{{ ovirt_auth }}"
+ state: "absent"
+ name: "{{ current_vm.name }}"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_present.yml b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_present.yml
new file mode 100644
index 00000000..3ee0725d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/roles/vm_infra/tasks/vm_state_present.yml
@@ -0,0 +1,157 @@
+---
+- name: Create VMs
+ include_tasks: create_vms.yml
+ with_items: "{{ create_vms }}"
+ loop_control:
+ loop_var: "current_vm"
+
+- name: Wait for VMs to be added
+ async_status: "jid={{ item.ansible_job_id }}"
+ register: job_result
+ with_items: "{{ all_vms }}"
+ when: all_vms is defined
+ until: job_result.finished
+ retries: "{{ (vm_infra_create_all_timeout|int // vm_infra_create_poll_interval) + 1 }}"
+ delay: "{{ vm_infra_create_poll_interval }}"
+
+- name: Apply any Affinity Groups
+ import_tasks: affinity_groups.yml
+
+- name: Apply any Affinity Labels
+ import_tasks: affinity_labels.yml
+
+- name: Manage profile disks
+ ovirt_disk:
+ auth: "{{ ovirt_auth }}"
+ name: "{% if item.1.name_prefix | default(true) %}{{ item.0.name }}_{% endif %}{{ item.1.name }}"
+ vm_name: "{{ item.0.name }}"
+ id: "{{ item.1.id | default(omit) }}"
+ size: "{{ item.1.size | default(omit) }}"
+ format: "{{ item.1.format | default(omit) }}"
+ storage_domain: "{{ item.1.storage_domain | default(omit) }}"
+ interface: "{{ item.1.interface | default(omit) }}"
+ bootable: "{{ item.1.bootable | default(omit) }}"
+ activate: "{{ item.1.activate | default(omit) }}"
+ wait: true
+ # When there is no profile disks it will use vms disks
+ with_subelements:
+ - "{{ create_vms }}"
+ - "profile.disks"
+ - flags:
+ skip_missing: true
+
+- name: Manage virtual machines disks
+ ovirt_disk:
+ auth: "{{ ovirt_auth }}"
+ name: "{% if item.1.name_prefix | default(true) %}{{ item.0.name }}_{% endif %}{{ item.1.name }}"
+ vm_name: "{{ item.0.name }}"
+ id: "{{ item.1.id | default(omit) }}"
+ size: "{{ item.1.size | default(omit) }}"
+ format: "{{ item.1.format | default(omit) }}"
+ storage_domain: "{{ item.1.storage_domain | default(omit) }}"
+ interface: "{{ item.1.interface | default(omit) }}"
+ bootable: "{{ item.1.bootable | default(omit) }}"
+ activate: "{{ item.1.activate | default(omit) }}"
+ wait: true
+ # When there is profile with disks, vms disks will update them
+ with_subelements:
+ - "{{ create_vms }}"
+ - "disks"
+ - flags:
+ skip_missing: true
+
+- name: Manage profile NICs
+ ovirt_nic:
+ auth: "{{ ovirt_auth }}"
+ vm: "{{ item.0.name }}"
+ name: "{{ item.1.name | default(omit) }}"
+ interface: "{{ item.1.interface | default(omit) }}"
+ mac_address: "{{ item.1.mac_address | default(omit) }}"
+ profile: "{{ item.1.profile | default(omit) }}"
+ network: "{{ item.1.network | default(omit) }}"
+ # When there is no profile NICs it will use vms NICs
+ with_subelements:
+ - "{{ create_vms }}"
+ - "profile.nics"
+ - flags:
+ skip_missing: true
+
+- name: Manage virtual machines NICs
+ ovirt_nic:
+ auth: "{{ ovirt_auth }}"
+ vm: "{{ item.0.name }}"
+ name: "{{ item.1.name | default(omit) }}"
+ interface: "{{ item.1.interface | default(omit) }}"
+ mac_address: "{{ item.1.mac_address | default(omit) }}"
+ profile: "{{ item.1.profile | default(omit) }}"
+ network: "{{ item.1.network | default(omit) }}"
+ # When there is profile with nics, vms nics will update them
+ with_subelements:
+ - "{{ create_vms }}"
+ - "nics"
+ - flags:
+ skip_missing: true
+
+- name: Manage VMs state
+ include_tasks: manage_state.yml
+ with_items: "{{ create_sensitive_vms }}"
+ loop_control:
+ loop_var: "current_vm"
+
+- name: Wait for VMs to be started
+ no_log: "{{ not debug_vm_create }}"
+ async_status: "jid={{ item.ansible_job_id }}"
+ register: job_result
+ with_items: "{{ started_vms }}"
+ when: started_vms is defined
+ until: job_result.finished
+ retries: "{{ (vm_infra_create_all_timeout|int // vm_infra_create_poll_interval) + 1 }}"
+ delay: "{{ vm_infra_create_poll_interval }}"
+
+# to_json|from_json in vms is WA for: https://github.com/ansible/ansible/issues/27299
+- name: Apply tags from VM profiles
+ ovirt_tag:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item }}"
+ vms: "{{ create_vms|to_json|from_json|json_query(query)|map(attribute='name') | list }}"
+ with_items: "{{ create_vms | selectattr('tag', 'defined') | map(attribute='tag') | list | unique }}"
+ vars:
+ query: "[?contains(tag, '{{ item }}')]"
+
+- name: Apply tags from VMs
+ ovirt_tag:
+ auth: "{{ ovirt_auth }}"
+ name: "{{ item }}"
+ vms: "{{ create_vms|to_json|from_json|json_query(query)|map(attribute='name') | list }}"
+ with_items: "{{ create_vms | selectattr('profile', 'defined') | map(attribute='profile')
+ | selectattr('tag', 'defined') | map(attribute='tag') | list | unique }}"
+ vars:
+ query: "[?contains(profile.tag, '{{ item }}')]"
+ defined_vms: "{{ create_vms | selectattr('profile.tag', 'defined') | list | unique }}"
+
+
+- block:
+ - name: Filter ovirt_vms to get IP
+ set_fact:
+ ip_cond: "vm_info.ovirt_vms | ovirt.ovirt.ovirtvmip{{ wait_for_ip_version }}(network_ip='{{ wait_for_ip_range }}') | length > 0"
+
+ - name: Wait for VMs IP
+ ovirt_vm_info:
+ auth: "{{ ovirt_auth }}"
+ pattern: "name={{ item.name }}"
+ fetch_nested: true
+ nested_attributes: ips
+ with_items:
+ - "{{ create_vms }}"
+ until: "ip_cond"
+ retries: "{{ vm_infra_wait_for_ip_retries }}"
+ register: vm_info
+ delay: "{{ vm_infra_wait_for_ip_delay }}"
+ when: "(item.state is undefined and item.profile.state is defined and item.profile.state != 'stopped')
+ and (item.state is defined and item.state != 'stopped') | default('present') != 'stopped'"
+ # FIXME: Refactor the condition
+
+ - name: Create inventory
+ include_tasks: create_inventory.yml
+
+ when: "wait_for_ip"
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/tests/.gitignore b/collections-debian-merged/ansible_collections/ovirt/ovirt/tests/.gitignore
new file mode 100644
index 00000000..ea1472ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/tests/.gitignore
@@ -0,0 +1 @@
+output/
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.10.txt b/collections-debian-merged/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.10.txt
new file mode 100644
index 00000000..32467e39
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.10.txt
@@ -0,0 +1,4 @@
+automation/build-artifacts.sh shebang!skip
+automation/build-artifacts-manual.sh shebang!skip
+automation/check-patch.sh shebang!skip
+build.sh shebang!skip
diff --git a/collections-debian-merged/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.9.txt b/collections-debian-merged/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.9.txt
new file mode 100644
index 00000000..32467e39
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/ovirt/ovirt/tests/sanity/ignore-2.9.txt
@@ -0,0 +1,4 @@
+automation/build-artifacts.sh shebang!skip
+automation/build-artifacts-manual.sh shebang!skip
+automation/check-patch.sh shebang!skip
+build.sh shebang!skip